code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# coding: utf-8 from __future__ import division, unicode_literals """ This module defines the BorgQueen class, which manages drones to assimilate data using Python's multiprocessing. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "[email protected]" __date__ = "Mar 18, 2012" import os import json import logging from monty.io import zopen from monty.json import MontyEncoder, MontyDecoder from multiprocessing import Manager, Pool logger = logging.getLogger("BorgQueen") class BorgQueen(object): """ The Borg Queen controls the drones to assimilate data in an entire directory tree. Uses multiprocessing to speed up things considerably. It also contains convenience methods to save and load data between sessions. Args: drone (Drone): An implementation of :class:`pymatgen.apps.borg.hive.AbstractDrone` to use for assimilation. rootpath (str): The root directory to start assimilation. Leave it as None if you want to do assimilation later, or is using the BorgQueen to load previously assimilated data. ndrones (int): Number of drones to parallelize over. Typical machines today have up to four processors. Note that you won't see a 100% improvement with two drones over one, but you will definitely see a significant speedup of at least 50% or so. If you are running this over a server with far more processors, the speedup will be even greater. """ def __init__(self, drone, rootpath=None, number_of_drones=1): self._drone = drone self._num_drones = number_of_drones self._data = [] if rootpath: if number_of_drones > 1: self.parallel_assimilate(rootpath) else: self.serial_assimilate(rootpath) def parallel_assimilate(self, rootpath): """ Assimilate the entire subdirectory structure in rootpath. """ logger.info('Scanning for valid paths...') valid_paths = [] for (parent, subdirs, files) in os.walk(rootpath): valid_paths.extend(self._drone.get_valid_paths((parent, subdirs, files))) manager = Manager() data = manager.list() status = manager.dict() status['count'] = 0 status['total'] = len(valid_paths) logger.info('{} valid paths found.'.format(len(valid_paths))) p = Pool(self._num_drones) p.map(order_assimilation, ((path, self._drone, data, status) for path in valid_paths)) for d in data: self._data.append(json.loads(d, cls=MontyDecoder)) def serial_assimilate(self, rootpath): """ Assimilate the entire subdirectory structure in rootpath serially. """ valid_paths = [] for (parent, subdirs, files) in os.walk(rootpath): valid_paths.extend(self._drone.get_valid_paths((parent, subdirs, files))) data = [] count = 0 total = len(valid_paths) for path in valid_paths: newdata = self._drone.assimilate(path) self._data.append(newdata) count += 1 logger.info('{}/{} ({:.2f}%) done'.format(count, total, count / total * 100)) for d in data: self._data.append(json.loads(d, cls=MontyDecoder)) def get_data(self): """ Returns an list of assimilated objects """ return self._data def save_data(self, filename): """ Save the assimilated data to a file. Args: filename (str): filename to save the assimilated data to. Note that if the filename ends with gz or bz2, the relevant gzip or bz2 compression will be applied. """ with zopen(filename, "wt") as f: s = json.dumps(list(self._data), f, cls=MontyEncoder) # This complicated for handles unicode in both Py2 and 3. f.write("%s" % s) def load_data(self, filename): """ Load assimilated data from a file """ with zopen(filename, "rt") as f: self._data = json.load(f, cls=MontyDecoder) def order_assimilation(args): """ Internal helper method for BorgQueen to process assimilation """ (path, drone, data, status) = args newdata = drone.assimilate(path) if newdata: data.append(json.dumps(newdata, cls=MontyEncoder)) status['count'] += 1 count = status['count'] total = status['total'] logger.info('{}/{} ({:.2f}%) done'.format(count, total, count / total * 100))
Dioptas/pymatgen
pymatgen/apps/borg/queen.py
Python
mit
5,010
class Node(object): """Node class""" def __init__(self, data=None, next=None): self.data = data self.next = next def get_data(self): return self.data def get_next(self): return self.next def add_next(self, new_next): self.next = new_next def print_list(head): end = False current = head while end is not True: print(current.get_data()) if current.get_next() is None: end = True else: current = current.get_next() def reverse_list(head): prev = None current = head next_node = a.get_next() while next_node is not None: current.add_next(prev) prev = current current = next_node next_node = next_node.get_next() if next_node is None: current.add_next(prev) a = Node(1) b = Node(2) c = Node(3) d = Node(4) a.add_next(b) b.add_next(c) c.add_next(d) print_list(a) print('reversed list:') reverse_list(a) print_list(d)
chandps/Topcoder
sandbox/LinkedList.py
Python
mit
1,015
import sys, os, yaml, glob import subprocess import argparse def main(args): workingDir = os.getcwd() samples_data_dir = args.sample_data_dir assemblies_data_dir = args.assemblies_data_dir assemblers = sum(args.assemblers, []) for sample_dir_name in [dir for dir in os.listdir(samples_data_dir) if os.path.isdir(os.path.join(samples_data_dir, dir))]: validation_folder = os.path.join(os.getcwd(), sample_dir_name) if not os.path.exists(validation_folder): os.makedirs(validation_folder) else: print "done ({} folder already present, assumed already run)".format(validation_folder) os.chdir(validation_folder) sample_YAML_name = os.path.join(validation_folder, "{}_validation.yaml".format(sample_dir_name)) sample_YAML = open(sample_YAML_name, 'w') sample_YAML.write("pipeline:\n") sample_YAML.write(" evaluete\n") sample_YAML.write("tools:\n") sample_YAML.write(" [qaTools, FRC]\n") sample_YAML.write("genomeSize: {}\n".format(args.genomeSize)) sample_YAML.write("output: {}\n".format(sample_dir_name)) sample_YAML.write("threads: 16\n") sample_YAML.write("minCtgLength: 2000\n") sample_YAML.write("reference:\n") sample_YAML.write("libraries:\n") sample_data_dir = os.path.join(samples_data_dir,sample_dir_name) assembly_data_dir = os.path.join(assemblies_data_dir,sample_dir_name) sample_files = [ f for f in os.listdir(sample_data_dir) if os.path.isfile(os.path.join(sample_data_dir,f))] pair1_file = "" pair2_file = "" single = "" sample_YAML.write(" lib1:\n") for file in sample_files: if "_R1_" in file or "_1.fastq.gz" in file: if pair1_file: sys.exit("Error: processing sample {} found more that one library/run for read 1".format(sample_dir_name)) pair1_file = os.path.join(sample_data_dir,file) sample_YAML.write(" pair1: {}\n".format(pair1_file)) elif "_R2_" in file or "_2.fastq.gz" in file: if pair2_file: sys.exit("Error: processing sample {} found more that one library/run for read 2".format(sample_dir_name)) pair2_file = os.path.join(sample_data_dir,file) sample_YAML.write(" pair2: {}\n".format(pair2_file)) elif "merged" in file or "single" in file: single = os.path.join(sample_data_dir,file) sample_YAML.write(" orientation: {}\n".format(args.orientation)) sample_YAML.write(" insert: {}\n".format(args.insert)) sample_YAML.write(" std: {}\n".format(args.std)) sample_YAML.close() command_to_run = "python ~/DE_NOVO_PIPELINE/de_novo_scilife/utils/run_assembly_evaluation.py --global-config {} --sample-config {}_validation.yaml \ --assemblies-dir {} --assembler {} --generatePDF 1".format(args.global_config, sample_dir_name, assembly_data_dir , " ".join(assemblers)) print command_to_run subprocess.call(command_to_run, shell=True) os.chdir(workingDir) def submit_job(sample_config, global_config, sample_name): workingDir = os.getcwd() slurm_file = os.path.join(workingDir, "{}.slurm".format(sample_name)) slurm_handle = open(slurm_file, "w") slurm_handle.write("#! /bin/bash -l\n") slurm_handle.write("set -e\n") slurm_handle.write("#SBATCH -A a2010002\n") slurm_handle.write("#SBATCH -o {}_QC.out\n".format(sample_name)) slurm_handle.write("#SBATCH -e {}_QC.err\n".format(sample_name)) slurm_handle.write("#SBATCH -J {}_QC.job\n".format(sample_name)) slurm_handle.write("#SBATCH -p node -n 8\n") slurm_handle.write("#SBATCH -t 05:00:00\n") slurm_handle.write("#SBATCH --mail-user [email protected]\n") slurm_handle.write("#SBATCH --mail-type=ALL\n") slurm_handle.write("\n\n"); slurm_handle.write("module load abyss/1.3.5\n"); slurm_handle.write("python ~/assembly_pipeline/de_novo_scilife/script/deNovo_pipeline.py --global-config {} --sample-config {}\n\n".format(global_config,sample_config)) slurm_handle.close() command=("sbatch", slurm_file) print command subprocess.call(command) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--sample-data-dir', help="full path to directory containing one folder per sample. Each sample contaains only one library (i.e., one PE lib)", type=str) parser.add_argument('--assemblies-data-dir', help="full path to directory containing one for each sample the de novo assemblies", type=str) parser.add_argument('--assemblers', action='append', nargs='+', help="List of assemblers to be evalueted") parser.add_argument('--genomeSize', help="genome size", type=str) parser.add_argument('--orientation', help="I assume I am working only with PE (if not manual editing is needed)", type=str) parser.add_argument('--insert', help="I assume that all samples have the same insert (if not manual editing is needed)", type=str) parser.add_argument('--std', help="I assume tha all sample have same std (if not manual editing is needed)", type=str) parser.add_argument('--global-config', help="global configuration file") args = parser.parse_args() main(args)
senthil10/NouGAT
utils/prepare_validation_config.py
Python
mit
5,461
# Generated by Haxe 4.0.5 # coding: utf-8 import sys class Main: __slots__ = () @staticmethod def main(): print("Hello World") class python_internal_MethodClosure: __slots__ = ("obj", "func") def __init__(self,obj,func): self.obj = obj self.func = func def __call__(self,*args): return self.func(self.obj,*args) Main.main()
sebgod/linguist
test/fixtures/Generated/Haxe/main.py
Python
mit
392
"""Descriptor - Provide useful descriptions for common file types. Refer to https://mxr.mozilla.org/webtools-central/source/mxr/Local.pm#27 """ from itertools import ifilter import re from os import listdir from os.path import splitext, basename, join, isfile import dxr.indexers def is_readme(filename): """Return whether filename is probably a readme.""" return filename.lower() in {'readme', 'readme.md', 'readme.txt'} def describe_readme(lines): """Return a string that represents a description for the given lines of a presumed readme file or None if it can extract no suitable description.""" # For now the heuristic is just the first non-empty line. return next(ifilter(None, (line.strip() for line in lines)), None) class FolderToIndex(dxr.indexers.FolderToIndex): browse_headers = ['Description'] def needles(self): """If the folder contains a readme, then yield the first line of the readme as the description. Similar to https://mxr.mozilla.org/webtools-central/source/mxr/Local.pm#251. """ # listdir() returns unicode iff a unicode path is passed in. self.path # is a bytestring. for entry in sorted(listdir(self.path)): path = join(self.path, entry) # If we find a readme, then open it and return the first line if # it's non-empty. if is_readme(entry) and isfile(path): with open(path) as readme: try: first_line = readme.readline(100).decode(self.tree.source_encoding) except UnicodeDecodeError: continue description = describe_readme([first_line]) if description: # Pack into a list for consistency with the file needle. return [('Description', [description])] # Didn't find anything to use as a description return [] class FileToIndex(dxr.indexers.FileToIndex): """Do lots of work to yield a description needle.""" # comment_re matches C-style block comments: comment_re = re.compile(r'^(/\*[*\s]*)(?P<description>(\*(?!/)|[^*])*)\*/', flags=re.M) docstring_res = [re.compile(r'"""\s*(?P<description>[^"]*)"""', flags=re.M), re.compile(r"'''\s*(?P<description>[^']*)'''", flags=re.M)] title_re = re.compile(r'<title>([^<]*)</title>') def __init__(self, path, contents, plugin_name, tree): super(FileToIndex, self).__init__(path, contents, plugin_name, tree) self._sixty_lines = None @property def sixty_lines(self): if self._sixty_lines is None: try: self._sixty_lines = self.contents[:self.char_offset(60, 1)].splitlines(True) except IndexError: # Then there are less than 60 lines total, just split what we have. self._sixty_lines = self.contents.splitlines(True) return self._sixty_lines def needles(self): if self.contains_text(): extension = splitext(self.path)[1] description = None if extension: try: # Find the describer method, skipping the dot on extension. describer = getattr(self, 'describe_' + extension[1:]) except AttributeError: # Don't have a descriptor function for this file type, we can try generic later. pass else: description = describer() if not description: description = self.generic_describe() if description: yield 'Description', description[:100].strip() def describe_html(self): """Return the contents of the <title> tag.""" match = self.title_re.search(self.contents) if match: return match.group(1) def describe_py(self): """Return the contents of the first line of the first docstring if there is one in the first 60 lines.""" joined_lines = ''.join(self.sixty_lines) for docstring_re in self.docstring_res: match = docstring_re.search(joined_lines) if match: return match.group('description') def generic_describe(self): """Look at the first 60 lines for a match for {{self.path|description}} [delimiter] text, and return the first text we find. Unless it's a readme, then return the first line.""" filename = basename(self.path) if is_readme(filename): possible_description = describe_readme(self.sixty_lines) if possible_description: return possible_description # Not a readme file, try to match the filename: description pattern. root, ext = splitext(filename) delimiters = ':,-' try: description_re = re.compile(ur'(?:{}|{}|description)' '(?:{})?\s*(?:[{}]\n?)\s*' '(?P<description>[\w\s-]+)'.format( re.escape(self.path.decode('utf-8')), re.escape(root.decode('utf-8')), re.escape(ext.decode('utf-8')), delimiters), re.IGNORECASE | re.UNICODE) except UnicodeDecodeError: # We couldn't make Unicode sense of the bag-of-bytes filename. pass else: for line in self.sixty_lines: match = description_re.search(line) if match: return match.group('description') # Haven't returned so we can fall back to the first non-empty line of # the first doc-comment. for match in self.comment_re.finditer(''.join(self.sixty_lines)): desc = match.group('description').strip() desc_lower = desc.lower() # Skip any comment that contains the license or a tab-width # emacs/vim setting. if not any(pattern in desc_lower for pattern in ['tab-width', 'license', 'vim:']): return desc
pelmers/dxr
dxr/plugins/descriptor/__init__.py
Python
mit
6,361
#!/usr/bin/env python # encoding: utf-8 ################################################################################ # # RMG - Reaction Mechanism Generator # # Copyright (c) 2009-2011 by the RMG Team ([email protected]) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ################################################################################ import os.path import shutil class Subject(object): """Subject in Observer Pattern""" def __init__(self): self._observers = [] """ Call this method when your (self-implemented) observer class should start listening to the Subject class. e.g.: listener = YourOwnListener() subject.attach(listener) """ def attach(self, observer): if not observer in self._observers: self._observers.append(observer) """ Call this method when your (self-implemented) observer class should stop listening to the Subject class. e.g.: listener = YourOwnListener() subject.attach(listener) ...<do some work>... subject.detach(listener) """ def detach(self, observer): try: self._observers.remove(observer) except ValueError: pass """ Call this method in classes that implement Subject, when the data that your interested in, is available. e.g.: class YourClass(Subject): ... def simulate(...) <stuff is being done> self.notify() <continue doing other stuff> Make sure that your listener class implements the update(subject) method! e.g.: class YourOwnListener(object): def __init__(self): self.data = [] def update(self, subject): self.data.append(subject.data) """ def notify(self, modifier=None): for observer in self._observers: if modifier != observer: observer.update(self) def makeOutputSubdirectory(outputDirectory, folder): """ Create a subdirectory `folder` in the output directory. If the folder already exists (e.g. from a previous job) its contents are deleted. """ dir = os.path.join(outputDirectory, folder) if os.path.exists(dir): # The directory already exists, so delete it (and all its content!) shutil.rmtree(dir) os.mkdir(dir)
chatelak/RMG-Py
rmgpy/util.py
Python
mit
3,431
class Solution: # @param A, a list of integers # @return an integer def maxSubArray(self, A): miniPrefix = 0 maxiSubArraySum = A[0] sum = 0 for i in A: sum += i maxiSubArraySum = max(maxiSubArraySum, sum - miniPrefix) miniPrefix = min(miniPrefix, sum) return maxiSubArraySum
happylixue/LeetCodeSol
problems/maximum-subarray/sol.py
Python
mit
385
from django.conf.urls.defaults import * from account.forms import * urlpatterns = patterns('', url(r'^login/$', 'account.views.login', name="acct_login"), url(r'^password_change/$', 'account.views.password_change', name="acct_passwd"), url(r'^password_reset/$', 'account.views.password_reset', name="acct_passwd_reset"), url(r'^logout/$', 'django.contrib.auth.views.logout', {"template_name": "account/logout.html"}, name="acct_logout"), )
bhaugen/localecon
account/urls.py
Python
mit
458
from pyquark.nxt.color_sensor import ColorSensor import logging import time from pyquark.io.i2c import I2c if __name__ == '__main__': log = logging.getLogger('pyquark') log.setLevel(logging.INFO) i2c = I2c() bus = i2c.smbus bus.write_byte_data(0x20, 0x29, 0x04) color_sensor = ColorSensor() while True: print color_sensor.aio.value() #color_sensor.i2c.smbus.write_byte(0x41, 0x00) #color_sensor.color() time.sleep(1)
rli9/pygalileo
example/color_sensor.py
Python
mit
483
############################################################################### # # The MIT License (MIT) # # Copyright (c) Crossbar.io Technologies GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from os import environ from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner class Component(ApplicationSession): """ An application component that subscribes and receives events. After receiving 5 events, it unsubscribes, sleeps and then resubscribes for another run. Then it stops. """ @inlineCallbacks def test(self): self.received = 0 self.sub = yield self.subscribe(self.on_event, 'com.myapp.topic1') print("Subscribed with subscription ID {}".format(self.sub.id)) @inlineCallbacks def on_event(self, i): print("Got event: {}".format(i)) self.received += 1 if self.received > 5: self.runs += 1 if self.runs > 1: self.leave() else: yield self.sub.unsubscribe() print("Unsubscribed .. continue in 5s ..") reactor.callLater(5, self.test) @inlineCallbacks def onJoin(self, details): print("session attached") self.runs = 0 yield self.test() def onDisconnect(self): print("disconnected") reactor.stop() if __name__ == '__main__': url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws") realm = "crossbardemo" runner = ApplicationRunner(url, realm) runner.run(Component)
oberstet/autobahn-python
examples/twisted/wamp/pubsub/unsubscribe/frontend.py
Python
mit
2,743
from .account.views import account, github_bp from .content import content from .matrix.views import matrix from .members.views import members from .projects.views import projects blueprints = [account, content, github_bp, matrix, members, projects] def init_app(app): for blueprint in blueprints: app.register_blueprint(blueprint)
jazzband/site
jazzband/blueprints.py
Python
mit
347
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from lxml import etree from interpparser import gpo_cfr from regparser.test_utils.xml_builder import XMLBuilder from regparser.tree.xml_parser import tree_utils def test_interpretation_markers(): text = '1. Kiwis and Mangos' assert gpo_cfr.get_first_interp_marker(text) == '1' def test_interpretation_markers_roman(): text = 'iv. Kiwis and Mangos' assert gpo_cfr.get_first_interp_marker(text) == 'iv' def test_interpretation_markers_emph(): text = '<E T="03">1.</E> Kiwis and Mangos' assert gpo_cfr.get_first_interp_marker(text) == '<E T="03">1</E>' text = '<E T="03">1. Kiwis and Mangos.</E> More content.' assert gpo_cfr.get_first_interp_marker(text) == '<E T="03">1</E>' def test_interpretation_markers_none(): text = '(iv) Kiwis and Mangos' assert gpo_cfr.get_first_interp_marker(text) is None def test_interpretation_markers_stars_no_period(): for marker in ('4 ', 'iv ', 'A\t'): text = marker + '* * *' assert gpo_cfr.get_first_interp_marker(text) == marker.strip() text = "33 * * * Some more stuff" assert gpo_cfr.get_first_interp_marker(text) is None def test_build_supplement_tree(): """Integration test""" with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Section 737.5 NASCAR", SOURCE='HD2') ctx.P("1. Paragraph 1") ctx.P("i. Paragraph i; A. Start of A") ctx.HD("5(a) Access Device", SOURCE='HD2') ctx.P("1. Paragraph 111") ctx.P("i. Content content") ctx.P("ii. More content") ctx.P("A. Aaaaah") ctx.child_from_string('<P><E T="03">1.</E> More info</P>') ctx.child_from_string('<P><E T="03">2.</E> Second info</P>') ctx.child_from_string('<P><E T="03">3. Keyterms</E></P>') tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 1 i5 = tree.children[0] assert i5.label == ['737', '5', 'Interp'] assert len(i5.children) == 2 i51, i5a = i5.children assert i51.label == ['737', '5', 'Interp', '1'] assert len(i51.children) == 1 i51i = i51.children[0] assert i51i.label == ['737', '5', 'Interp', '1', 'i'] assert len(i51i.children) == 1 i51ia = i51i.children[0] assert i51ia.label == ['737', '5', 'Interp', '1', 'i', 'A'] assert i51ia.children == [] assert i5a.label == ['737', '5', 'a', 'Interp'] assert len(i5a.children) == 1 i5a1 = i5a.children[0] assert i5a1.label == ['737', '5', 'a', 'Interp', '1'] assert len(i5a1.children) == 2 i5a1i, i5a1ii = i5a1.children assert i5a1i.label == ['737', '5', 'a', 'Interp', '1', 'i'] assert i5a1i.children == [] assert i5a1ii.label == ['737', '5', 'a', 'Interp', '1', 'ii'] assert len(i5a1ii.children) == 1 i5a1iia = i5a1ii.children[0] assert i5a1iia.label == ['737', '5', 'a', 'Interp', '1', 'ii', 'A'] assert len(i5a1iia.children) == 3 i5a1iia1, i5a1iia2, i5a1iia3 = i5a1iia.children assert i5a1iia1.label == ['737', '5', 'a', 'Interp', '1', 'ii', 'A', '1'] assert i5a1iia1.tagged_text == '<E T="03">1.</E> More info' assert i5a1iia1.children == [] assert i5a1iia2.label == ['737', '5', 'a', 'Interp', '1', 'ii', 'A', '2'] assert i5a1iia2.tagged_text == '<E T="03">2.</E> Second info' assert i5a1iia2.children == [] assert i5a1iia3.label == ['737', '5', 'a', 'Interp', '1', 'ii', 'A', '3'] assert i5a1iia3.tagged_text == '<E T="03">3. Keyterms</E>' assert i5a1iia3.children == [] def test_build_supplement_tree_spacing(): """Integration test""" with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Section 737.5 NASCAR", SOURCE='HD2') ctx.child_from_string('<P>1.<E T="03">Phrase</E>. More Content</P>') ctx.child_from_string('<P>i. I like<PRTPAGE />ice cream</P>') ctx.P("A. Aaaaah") ctx.child_from_string('<P><E T="03">1.</E>More info</P>') tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 1 s5 = tree.children[0] assert len(s5.children) == 1 s51 = s5.children[0] assert s51.text == "1. Phrase. More Content" assert len(s51.children) == 1 s51i = s51.children[0] assert s51i.text == "i. I like ice cream" assert len(s51i.children) == 1 s51ia = s51i.children[0] assert s51ia.text == "A. Aaaaah" assert len(s51ia.children) == 1 s51ia1 = s51ia.children[0] assert s51ia1.text == "1. More info" assert s51ia1.children == [] def test_build_supplement_tree_repeats(): """Integration test""" with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Appendices G and H-Content</HD>", SOURCE='HD2') ctx.P("1. G:H") ctx.HD("Appendix G", SOURCE='HD2') ctx.P("1. G") ctx.HD("Appendix H", SOURCE='HD2') ctx.P("1. H") tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 3 aGH, aG, aH = tree.children assert aGH.label == ['737', 'G_H', 'Interp'] assert aG.label == ['737', 'G', 'Interp'] assert aH.label == ['737', 'H', 'Interp'] def test_build_supplement_tree_skip_levels(): with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Section 737.5 NASCAR", SOURCE='HD2') ctx.HD("5(a)(1)(i) Access Device", SOURCE='HD2') ctx.P("1. Paragraph 111") ctx.HD("5(b) Other Devices", SOURCE='HD2') ctx.P("1. Paragraph 222") tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 1 i5 = tree.children[0] assert i5.label == ['737', '5', 'Interp'] assert len(i5.children) == 2 i5a, i5b = i5.children assert i5a.label == ['737', '5', 'a', 'Interp'] assert len(i5a.children) == 1 i5a1 = i5a.children[0] assert i5a1.label == ['737', '5', 'a', '1', 'Interp'] assert len(i5a1.children) == 1 i5a1i = i5a1.children[0] assert i5a1i.label == ['737', '5', 'a', '1', 'i', 'Interp'] assert len(i5a1i.children) == 1 assert i5b.label == ['737', '5', 'b', 'Interp'] assert len(i5b.children) == 1 def test_build_supplement_tree_appendix_paragraphs(): with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Appendix H", SOURCE='HD2') ctx.HD("(b) bbbbbbb", SOURCE='HD3') ctx.P("1. Paragraph b") ctx.HD("(b)(5) b5b5b5", SOURCE='HD3') ctx.P("1. Paragraph b5") tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 1 ih = tree.children[0] assert ih.label == ['737', 'H', 'Interp'] assert len(ih.children) == 1 ihb = ih.children[0] assert ihb.label == ['737', 'H', 'b', 'Interp'] assert len(ihb.children) == 2 ihb1, ihb5 = ihb.children assert ihb1.label == ['737', 'H', 'b', 'Interp', '1'] assert ihb5.label == ['737', 'H', 'b', '5', 'Interp'] def test_build_supplement_intro_section(): """Integration test""" with XMLBuilder('APPENDIX') as ctx: ctx.HD("Supplement I to Part 737-Official Interpretations", SOURCE='HED') ctx.HD("Introduction", SOURCE='HD1') ctx.P("1. Some content. (a) Badly named") ctx.P("(b) Badly named") ctx.HD("Subpart A", SOURCE='HD1') ctx.HD("Section 737.13", SOURCE='HD2') ctx.child_from_string("<P><E>13(a) Some Stuff!</E></P>") ctx.P("1. 131313") ctx.HD("Appendix G", SOURCE='HD2') ctx.P("1. G") tree = gpo_cfr.build_supplement_tree('737', ctx.xml) assert tree.label == ['737', 'Interp'] assert len(tree.children) == 3 h1, s13, g = tree.children assert h1.label == ['737', 'Interp', 'h1'] assert s13.label == ['737', '13', 'Interp'] assert g.label == ['737', 'G', 'Interp'] assert len(h1.children) == 1 assert h1.children[0].text == ('1. Some content. (a) Badly named\n\n' '(b) Badly named') assert h1.children[0].children == [] assert len(s13.children) == 1 assert s13.children[0].title == '13(a) Some Stuff!' def test_process_inner_child(): with XMLBuilder('ROOT') as ctx: ctx.HD("Title") ctx.P("1. 111. i. iii") ctx.STARS() ctx.P("A. AAA") ctx.child_from_string('<P><E T="03">1.</E> eee</P>') node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() n1 = stack.m_stack[0][0][1] assert n1.label == ['1'] assert len(n1.children) == 1 n1i = n1.children[0] assert n1i.label == ['1', 'i'] assert n1i.text == 'i. iii' assert len(n1i.children) == 1 n1ia = n1i.children[0] assert n1ia.label == ['1', 'i', 'A'] assert len(n1ia.children) == 1 n1ia1 = n1ia.children[0] assert n1ia1.label == ['1', 'i', 'A', '1'] assert n1ia1.children == [] def test_process_inner_child_space(): with XMLBuilder('ROOT') as ctx: ctx.HD("Title") ctx.P("1. 111") ctx.P("i. See country A. Not that country") node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() n1 = stack.m_stack[0][0][1] assert n1.label == ['1'] assert len(n1.children) == 1 n1i = n1.children[0] assert n1i.label == ['1', 'i'] assert n1i.children == [] def test_process_inner_child_incorrect_xml(): with XMLBuilder('ROOT') as ctx: ctx.HD("Title") ctx.child_from_string('<P><E T="03">1.</E> 111</P>') ctx.P("i. iii") ctx.child_from_string('<P><E T="03">2.</E> 222 Incorrect Content</P>') node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() assert len(stack.m_stack[0]) == 2 def test_process_inner_child_no_marker(): with XMLBuilder() as ctx: ctx.HD("Title") ctx.P("1. 111") ctx.P("i. iii") ctx.P("Howdy Howdy") node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() i1 = stack.m_stack[0][0][1] assert len(i1.children) == 1 i1i = i1.children[0] assert i1i.children == [] assert i1i.text == "i. iii\n\nHowdy Howdy" def test_process_inner_child_has_citation(): with XMLBuilder() as ctx: ctx.HD("Title") ctx.P("1. Something something see comment 22(a)-2.i. please") node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() tree = stack.m_stack[0][0][1] assert tree.children == [] def test_process_inner_child_stars_and_inline(): with XMLBuilder() as ctx: ctx.HD("Title") ctx.STARS() ctx.P("2. Content. * * *") ctx.STARS() ctx.P("xi. Content") ctx.STARS() node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() tree = stack.m_stack[0][0][1] assert tree.label == ['2'] assert len(tree.children) == 1 assert tree.children[0].label == ['2', 'xi'] assert tree.children[0].children == [] def test_process_inner_child_collapsed_i(): with XMLBuilder() as ctx: ctx.HD("Title") ctx.child_from_string( '<P>1. <E T="03">Keyterm text</E> i. Content content</P>') ctx.P("ii. Other stuff") node = ctx.xml.xpath('//HD')[0] stack = tree_utils.NodeStack() gpo_cfr.process_inner_children(stack, node) while stack.size() > 1: stack.unwind() tree = stack.m_stack[0][0][1] assert tree.label == ['1'] assert len(tree.children) == 2 assert tree.children[0].label == ['1', 'i'] assert tree.children[0].children == [] assert tree.children[1].label == ['1', 'ii'] assert tree.children[1].children == [] @pytest.mark.parametrize('title', [ "<HD SOURCE='HD1'>Some Title</HD>", "<HD SOURCE='HD2'>Some Title</HD>", "<P><E T='03'>Section 111.22</E></P>", "<P><E T='03'>21(b) Contents</E>.</P>", "<P>31(r) Contents.</P>", "<P>Section 111.31 Contents.</P>", "<P>Paragraph 51(b)(1)(i).</P>", ]) def test_is_title_success(title): assert gpo_cfr.is_title(etree.fromstring(title)) @pytest.mark.parametrize('title', [ "<HD SOURCE='HED'>Some Header</HD>", "<IMG>Some Image</IMG>", "<P>Then Section 22.111</P>", "<P><E T='03'>Section 222.33</E> More text</P>", "<P><E T='03'>Keyterm.</E> More text</P>", ]) def test_is_title_fail(title): assert not gpo_cfr.is_title(etree.fromstring(title)) def test_collapsed_markers_matches(): assert ['i'] == [m.group(1) for m in gpo_cfr.collapsed_markers_matches( '1. AAA - i. More', '1. AAA - i. More')] assert ['1'] == [m.group(1) for m in gpo_cfr.collapsed_markers_matches( 'A. AAA: 1. More', 'A. AAA: <E T="03">1</E>. More')] for txt in ("1. Content - i.e. More content", "1. Stuff in quotes like, “N.A.”", "i. References appendix D, part I.A.1. Stuff" "A. AAA - 1. More, without tags"): assert gpo_cfr.collapsed_markers_matches(txt, txt) == []
tadhg-ohiggins/regulations-parser
tests/interpparser/gpo_cfr_tests.py
Python
cc0-1.0
14,139
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('blocks', '0012_auto_20160428_1303'), ('tasks', '0008_taskmodel__contained_concepts'), ] operations = [ migrations.RemoveField( model_name='taskmodel', name='level', ), migrations.AddField( model_name='taskmodel', name='toolbox', field=models.ForeignKey(help_text='minimal toolbox requried to solve this task', default=None, to='blocks.Toolbox', null=True), ), ]
effa/flocs
tasks/migrations/0009_auto_20160428_1329.py
Python
gpl-2.0
654
import unittest from PyFoam.Execution.StepAnalyzedWatcher import StepAnalyzedWatcher theSuite=unittest.TestSuite()
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
unittests/Execution/test_StepAnalyzedWatcher.py
Python
gpl-2.0
117
import unittest import ctypes from ctypes.test import need_symbol import _ctypes_test @need_symbol('c_wchar') class UnicodeTestCase(unittest.TestCase): def test_wcslen(self): dll = ctypes.CDLL(_ctypes_test.__file__) wcslen = dll.my_wcslen wcslen.argtypes = [ctypes.c_wchar_p] self.assertEqual(wcslen("abc"), 3) self.assertEqual(wcslen("ab\u2070"), 3) self.assertRaises(ctypes.ArgumentError, wcslen, b"ab\xe4") def test_buffers(self): buf = ctypes.create_unicode_buffer("abc") self.assertEqual(len(buf), 3+1) buf = ctypes.create_unicode_buffer("ab\xe4\xf6\xfc") self.assertEqual(buf[:], "ab\xe4\xf6\xfc\0") self.assertEqual(buf[::], "ab\xe4\xf6\xfc\0") self.assertEqual(buf[::-1], '\x00\xfc\xf6\xe4ba') self.assertEqual(buf[::2], 'a\xe4\xfc') self.assertEqual(buf[6:5:-1], "") def test_embedded_null(self): class TestStruct(ctypes.Structure): _fields_ = [("unicode", ctypes.c_wchar_p)] t = TestStruct() # This would raise a ValueError: t.unicode = "foo\0bar\0\0" func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p class StringTestCase(UnicodeTestCase): def setUp(self): func.argtypes = [ctypes.c_char_p] func.restype = ctypes.c_char_p def tearDown(self): func.argtypes = None func.restype = ctypes.c_int def test_func(self): self.assertEqual(func(b"abc\xe4"), b"abc\xe4") def test_buffers(self): buf = ctypes.create_string_buffer(b"abc") self.assertEqual(len(buf), 3+1) buf = ctypes.create_string_buffer(b"ab\xe4\xf6\xfc") self.assertEqual(buf[:], b"ab\xe4\xf6\xfc\0") self.assertEqual(buf[::], b"ab\xe4\xf6\xfc\0") self.assertEqual(buf[::-1], b'\x00\xfc\xf6\xe4ba') self.assertEqual(buf[::2], b'a\xe4\xfc') self.assertEqual(buf[6:5:-1], b"") if __name__ == '__main__': unittest.main()
bruderstein/PythonScript
PythonLib/full/ctypes/test/test_unicode.py
Python
gpl-2.0
1,997
""" This file is part of exparser. exparser is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. exparser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with exparser. If not, see <http://www.gnu.org/licenses/>. """ fontFamily = 'Liberation Sans' fontSize = 9 palette = ['#edd400', '#c17d11', '#75507b', '#cc0000', '#73d216', \ '#f57900', '#3465a4'] plotLineColors = palette * 100 plotLineSymbols = ['o-'] * 100 plotLineWidth = 1 plotLineStyles = ['-'] * 100 capSize = 0 strDType = '|S128'
lvanderlinden/exparser
exparser/Constants.py
Python
gpl-2.0
916
from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.config import cConfig from resources.hosters.hoster import iHoster import re class cHoster(iHoster): def __init__(self): self.__sDisplayName = 'Vodlocker' self.__sFileName = self.__sDisplayName self.__sHD = '' def getDisplayName(self): return self.__sDisplayName def setDisplayName(self, sDisplayName): self.__sDisplayName = sDisplayName + ' [COLOR skyblue]'+self.__sDisplayName+'[/COLOR] [COLOR khaki]'+self.__sHD+'[/COLOR]' def setFileName(self, sFileName): self.__sFileName = sFileName def getFileName(self): return self.__sFileName def getPluginIdentifier(self): return 'vodlocker' def setHD(self, sHD): self.__sHD = '' def getHD(self): return self.__sHD def isDownloadable(self): return True def isJDownloaderable(self): return True def getPattern(self): return ''; def __getIdFromUrl(self, sUrl): sPattern = "http://vodlocker.com/([^<]+)" oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): return aResult[1][0] return '' def setUrl(self, sUrl): self.__sUrl = str(sUrl) def checkUrl(self, sUrl): return True def getUrl(self): return self.__sUrl def getMediaLink(self): return self.__getMediaLinkForGuest() def __getMediaLinkForGuest(self): oRequest = cRequestHandler(self.__sUrl) sHtmlContent = oRequest.request() sPattern = 'file: "([^"]+)"'; oParser = cParser() sHtmlContent=sHtmlContent.replace('|','/') aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): api_call = aResult[1][0] return True, api_call return False, False
mino60/venom-xbmc-addons-beta
plugin.video.vstream/resources/hosters/vodlocker.py
Python
gpl-2.0
2,050
# coding:utf-8 import gevent from gevent import (monkey, queue, event, pool) import re import sys import logging import unittest import urllib import urlparse import requests from threading import Timer from pyquery import PyQuery from utils import HtmlAnalyzer, UrlFilter __all__ = ['Strategy', 'UrlObj', 'Spider', 'HtmlAnalyzer', 'UrlFilter'] class Strategy(object): default_cookies = {} default_headers = { 'User-Agent': 'SinaSec Webscan Spider', 'Accept': 'Accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Cache-Control': 'max-age=0', 'Accept-Charset': 'GBK,utf-8;q=0.7,*;q=0.3', } def __init__(self,max_depth=5,max_count=5000,concurrency=5,timeout=10,time=6*3600,headers=None, cookies=None,ssl_verify=False,same_host=False,same_domain=True): self.max_depth = max_depth self.max_count = max_count self.concurrency = concurrency self.timeout = timeout self.time = time self.headers = self.default_headers self.headers.update(headers or {}) self.cookies = self.default_cookies self.cookies.update(cookies or {}) self.ssl_verify = ssl_verify self.same_host = same_host self.same_domain = same_domain class UrlObj(object): def __init__(self, url, depth=0, linkin=None): if not url.startswith("http"): url = "http://" + url self.url = url.strip('/') self.depth = depth self.linkin = linkin def __str__(self): return self.url def __repr__(self): return "<Url object: %s>" % self.url def __hash__(self): return hash(self.url) def setLinkin(self, urlobj): self.linkin = urlobj def incrDepth(self): self.depth += 1 class UrlTable(object): infinite = float("inf") def __init__(self, size=0): self.__urls = {} if size == 0 : size = self.infinite self.size = size def __len__(self): return len(self.__urls) def __contains__(self, url): return hash(url) in self.__urls.keys() def __iter__(self): for url in self.urls: yield url def insert(self, url): if isinstance(url, basestring): url = UrlObj(url) if url not in self: self.__urls.setdefault(hash(url), url) @property def urls(self): return self.__urls.values() def full(self): return len(self) >= self.size class Spider(object): logger = logging.getLogger("spider.mainthread") def __init__(self,strategy=Strategy()): monkey.patch_all() self.strategy = strategy self.queue = queue.Queue() self.urltable = UrlTable(strategy.max_count) self.pool = pool.Pool(strategy.concurrency) self.greenlet_finished = event.Event() self._stop = event.Event() def setRootUrl(self,url): if isinstance(url,basestring): url = UrlObj(url) self.root = url self.put(self.root) def put(self, url): if url not in self.urltable: self.queue.put(url) def run(self): self.timer = Timer(self.strategy.time, self.stop) self.timer.start() self.logger.info("spider '%s' begin running",self.root) while not self.stopped() and self.timer.isAlive(): for greenlet in list(self.pool): if greenlet.dead: self.pool.discard(greenlet) try: url = self.queue.get_nowait() except queue.Empty: if self.pool.free_count() != self.pool.size: self.greenlet_finished.wait() self.greenlet_finished.clear() continue else: self.stop() greenlet = Handler(url, self) self.pool.start(greenlet) def stopped(self): return self._stop.is_set() def stop(self): self.logger.info("spider '%s' finished. fetch total (%d) urls",self.root,len(self.urltable)) self.timer.cancel() self._stop.set() self.pool.join() self.queue.put(StopIteration) return def dump(self): import StringIO out = StringIO.StringIO() for url in self.urltable: try: print >> out ,url except: continue return out.getvalue() class Handler(gevent.Greenlet): logger = logging.getLogger("spider.handler") def __init__(self, urlobj, spider): gevent.Greenlet.__init__(self) self.urlobj = urlobj self.spider = spider self.charset = "utf-8" def _run(self): strategy = self.spider.strategy urltable = self.spider.urltable queue = self.spider.queue try: html = self.open(self.urlobj.url) except Exception, why: self.logger.debug("open '%s' failed,since : %s", self.urlobj, why) return self.stop() linkin = self.urlobj depth = linkin.depth + 1 if strategy.max_depth and (depth > strategy.max_depth): return self.stop() for link in self.feed(html): if urltable.full(): self.stop() self.spider.stop() return if link in urltable: continue if strategy.same_host and (not UrlFilter.isSameHost(link,linkin.url)): continue if strategy.same_domain and (not UrlFilter.isSameDomain(link, linkin.url)): continue url = UrlObj(link, depth, linkin) urltable.insert(url) queue.put(url) self.logger.debug( "sucess crawled '%s' the <%d> urls", url, len(urltable)) self.stop() def open(self, url): strategy = self.spider.strategy try: resp = requests.get(url, headers=strategy.headers, cookies=strategy.cookies, timeout=strategy.timeout, verify=strategy.ssl_verify) except requests.exceptions.RequestException, e: raise e if resp.status_code != requests.codes.ok: resp.raise_for_status() charset = HtmlAnalyzer.detectCharSet(resp.text) if charset is not None: self.charset = charset resp.encoding = charset return resp.text def feed(self,html): return HtmlAnalyzer.extractLinks(html,self.urlobj.url,self.charset) def stop(self): self.spider.greenlet_finished.set() self.kill(block=False) class TestSpider(unittest.TestCase): def setUp(self): self.root = "http://www.sina.com.cn" strategy = Strategy(max_depth=3, max_count=5000, same_host=False, same_domain=True) self.spider = Spider(strategy) self.spider.setRootUrl(self.root) self.spider.run() def testSpiderStrategy(self): self.assertEqual(len(self.spider.urltable), 5000) self.assertLessEqual(self.spider.urltable.urls[-1].depth, 3) for url in self.spider.urltable.urls[100:200]: self.assert_(UrlFilter.isSameDomain(self.root, str(url))) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG if "-v" in sys.argv else logging.WARN, format='%(asctime)s %(levelname)s %(message)s') unittest.main()
xujun10110/Hammer
lib/spider/spider.py
Python
gpl-2.0
7,646
# -*- coding: utf-8 -*- from __future__ import unicode_literals import unittest import bob class BobTests(unittest.TestCase): def test_stating_something(self): self.assertEqual( 'Whatever.', bob.hey('Tom-ay-to, tom-aaaah-to.') ) def test_shouting(self): self.assertEqual( 'Whoa, chill out!', bob.hey('WATCH OUT!') ) def test_asking_a_question(self): self.assertEqual( 'Sure.', bob.hey('Does this cryogenic chamber make me look fat?') ) def test_asking_a_numeric_question(self): self.assertEqual( 'Sure.', bob.hey('You are, what, like 15?') ) def test_talking_forcefully(self): self.assertEqual( 'Whatever.', bob.hey("Let's go make out behind the gym!") ) def test_using_acronyms_in_regular_speech(self): self.assertEqual( 'Whatever.', bob.hey("It's OK if you don't want to go to the DMV.") ) def test_forceful_questions(self): self.assertEqual( 'Whoa, chill out!', bob.hey('WHAT THE HELL WERE YOU THINKING?') ) def test_shouting_numbers(self): self.assertEqual( 'Whoa, chill out!', bob.hey('1, 2, 3 GO!') ) def test_only_numbers(self): self.assertEqual( 'Whatever.', bob.hey('1, 2, 3') ) def test_question_with_only_numbers(self): self.assertEqual( 'Sure.', bob.hey('4?') ) def test_shouting_with_special_characters(self): self.assertEqual( 'Whoa, chill out!', bob.hey('ZOMG THE %^*@#$(*^ ZOMBIES ARE COMING!!11!!1!') ) def test_shouting_with_umlauts(self): self.assertEqual( 'Whoa, chill out!', bob.hey('ÜMLÄÜTS!') ) def test_calmly_speaking_with_umlauts(self): self.assertEqual( 'Whatever.', bob.hey('ÜMLäÜTS!') ) def test_shouting_with_no_exclamation_mark(self): self.assertEqual( 'Whoa, chill out!', bob.hey('I HATE YOU') ) def test_statement_containing_question_mark(self): self.assertEqual( 'Whatever.', bob.hey('Ending with ? means a question.') ) def test_prattling_on(self): self.assertEqual( 'Sure.', bob.hey("Wait! Hang on. Are you going to be OK?") ) def test_silence(self): self.assertEqual( 'Fine. Be that way!', bob.hey('') ) def test_prolonged_silence(self): self.assertEqual( 'Fine. Be that way!', bob.hey(' \t') ) def test_starts_with_whitespace(self): self.assertEqual( 'Whatever.', bob.hey(' hmmmmmmm...') ) if __name__ == '__main__': unittest.main()
vellonce/python-exercises
bob/bob_test.py
Python
gpl-2.0
2,904
## # Copyright 2013 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing PSI, implemented as an easyblock @author: Kenneth Hoste (Ghent University) @author: Ward Poelmans (Ghent University) """ import os import shutil import easybuild.tools.environment as env from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.framework.easyconfig import BUILD from easybuild.tools.modules import get_software_root class EB_PSI(ConfigureMake): """ Support for building and installing PSI """ def __init__(self, *args, **kwargs): """Initialize class variables custom to PSI.""" super(EB_PSI, self).__init__(*args, **kwargs) self.psi_srcdir = None self.install_psi_objdir = None self.install_psi_srcdir = None @staticmethod def extra_options(): """Extra easyconfig parameters specific to PSI.""" extra_vars = { # always include running PSI unit tests (takes about 2h or less) 'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD], } return ConfigureMake.extra_options(extra_vars) def configure_step(self): """ Configure build outside of source directory. """ try: objdir = os.path.join(self.builddir, 'obj') os.makedirs(objdir) os.chdir(objdir) except OSError, err: self.log.error("Failed to prepare for configuration of PSI build: %s" % err) if self.toolchain.options.get('usempi', None): # PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly # we should always specify the sequential Fortran compiler, # to avoid problems with -lmpi vs -lmpi_mt during linking fcompvar = 'F77_SEQ' else: fcompvar = 'F77' # update configure options # using multi-threaded BLAS/LAPACK is important for performance, # cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii opt_vars = [ ('cc', 'CC'), ('cxx', 'CXX'), ('fc', fcompvar), ('libdirs', 'LDFLAGS'), ('blas', 'LIBBLAS_MT'), ('lapack', 'LIBLAPACK_MT'), ] for (opt, var) in opt_vars: self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var))) # -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers # both define SEEK_SET, this makes the one for MPI be ignored self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS')) # explicitely specify Python binary to use pythonroot = get_software_root('Python') if not pythonroot: self.log.error("Python module not loaded.") env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python')) # specify location of Boost boostroot = get_software_root('Boost') if not boostroot: self.log.error("Boost module not loaded.") self.cfg.update('configopts', "--with-boost=%s" % boostroot) # enable support for plugins self.cfg.update('configopts', "--with-plugins") # In order to create new plugins with PSI, it needs to know the location of the source # and the obj dir after install. These env vars give that information to the configure script. self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep)) self.install_psi_objdir = os.path.join(self.installdir, 'obj') self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir) env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir) env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir) super(EB_PSI, self).configure_step(cmd_prefix=self.cfg['start_dir']) def install_step(self): """Custom install procedure for PSI.""" super(EB_PSI, self).install_step() # the obj and unpacked sources must remain available for working with plugins try: for subdir in ['obj', self.psi_srcdir]: shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir)) except OSError, err: self.log.error("Failed to copy obj and unpacked sources to install dir: %s" % err) def sanity_check_step(self): """Custom sanity check for PSI.""" custom_paths = { 'files': ['bin/psi%s' % self.version.split('.')[0]], 'dirs': ['include', 'share/psi'], } super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Custom variables for PSI module.""" txt = super(EB_PSI, self).make_module_extra() txt += self.moduleGenerator.set_environment('PSI4DATADIR', '$root/share/psi') return txt
geimer/easybuild-easyblocks
easybuild/easyblocks/p/psi.py
Python
gpl-2.0
6,029
#!/usr/bin/python # # Copyright (c) 2018 Zim Kalinowski, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_containerregistry_facts version_added: "2.7" short_description: Get Azure Container Registry facts. description: - Get facts for Container Registry. options: resource_group: description: - The name of the resource group to which the container registry belongs. required: True name: description: - The name of the container registry. retrieve_credentials: description: - Retrieve credentials for container registry. type: bool default: no tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - "Zim Kalinowski (@zikalino)" ''' EXAMPLES = ''' - name: Get instance of Registry azure_rm_containerregistry_facts: resource_group: sampleresourcegroup name: sampleregistry - name: List instances of Registry azure_rm_containerregistry_facts: resource_group: sampleresourcegroup ''' RETURN = ''' registries: description: A list of dictionaries containing facts for registries. returned: always type: complex contains: id: description: - The resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr ies/myRegistry" name: description: - The name of the resource. returned: always type: str sample: myRegistry location: description: - The location of the resource. This cannot be changed after the resource is created. returned: always type: str sample: westus admin_user_enabled: description: - Is admin user enabled. returned: always type: bool sample: yes sku: description: - The SKU name of the container registry. returned: always type: str sample: classic provisioning_state: description: - Provisioning state of the container registry returned: always type: str sample: Succeeded login_server: description: - Login server for the registry. returned: always type: str sample: acrd08521b.azurecr.io credentials: description: - Credentials, fields will be empty if admin user is not enabled for ACR return: when C(retrieve_credentials) is set and C(admin_user_enabled) is set on ACR type: complex contains: username: description: - The user name for container registry. returned: when registry exists and C(admin_user_enabled) is set type: str sample: zim password: description: - password value returned: when registry exists and C(admin_user_enabled) is set type: str sample: pass1value password2: description: - password2 value returned: when registry exists and C(admin_user_enabled) is set type: str sample: pass2value tags: description: Tags assigned to the resource. Dictionary of string:string pairs. type: dict sample: { "tag1": "abc" } ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from msrestazure.azure_operation import AzureOperationPoller from azure.mgmt.containerregistry import ContainerRegistryManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMContainerRegistryFacts(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str' ), tags=dict( type='list' ), retrieve_credentials=dict( type='bool', default=False ) ) # store the results of the module operation self.results = dict( changed=False ) self.resource_group = None self.name = None self.retrieve_credentials = False super(AzureRMContainerRegistryFacts, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name: self.results['registries'] = self.get() elif self.resource_group: self.results['registries'] = self.list_by_resource_group() else: self.results['registries'] = self.list_all() return self.results def get(self): response = None results = [] try: response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group, registry_name=self.name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Registries.') if response is not None: if self.has_tags(response.tags, self.tags): results.append(self.format_item(response)) return results def list_all(self): response = None results = [] try: response = self.containerregistry_client.registries.list() self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not get facts for Registries.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def list_by_resource_group(self): response = None results = [] try: response = self.containerregistry_client.registries.list_by_resource_group(resource_group_name=self.resource_group) self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not get facts for Registries.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def format_item(self, item): d = item.as_dict() resource_group = d['id'].split('resourceGroups/')[1].split('/')[0] name = d['name'] credentials = {} admin_user_enabled = d['admin_user_enabled'] if self.retrieve_credentials and admin_user_enabled: credentials = self.containerregistry_client.registries.list_credentials(resource_group, name) d = { 'resource_group': resource_group, 'name': d['name'], 'location': d['location'], 'admin_user_enabled': admin_user_enabled, 'sku': d['sku']['tier'].lower(), 'provisioning_state': d['provisioning_state'], 'login_server': d['login_server'], 'id': d['id'], 'tags': d.get('tags', None), 'credentials': credentials } return d def main(): AzureRMContainerRegistryFacts() if __name__ == '__main__': main()
maartenq/ansible
lib/ansible/modules/cloud/azure/azure_rm_containerregistry_facts.py
Python
gpl-3.0
8,582
########################################################### # # Simple executor script for Batch class methods. # # The script is concatenated on the fly with the required # batch system class definition # # 15.11.2014 # Author: A.T. # ########################################################### from __future__ import print_function if __name__ == "__main__": import sys import json import urllib arguments = sys.argv[1] inputDict = json.loads(urllib.unquote(arguments)) method = inputDict.pop('Method') batchSystem = inputDict.pop('BatchSystem') batch = locals()[batchSystem]() try: result = getattr(batch, method)(**inputDict) except Exception as x: result = 'Exception: %s' % str(x) resultJson = urllib.quote(json.dumps(result)) print("============= Start output ===============") print(resultJson)
petricm/DIRAC
Resources/Computing/BatchSystems/executeBatch.py
Python
gpl-3.0
848
################################################################# # This file is part of glyr # + a command-line tool and library to download various sort of music related metadata. # + Copyright (C) [2011-2012] [Christopher Pahl] # + Hosted at: https://github.com/sahib/glyr # # glyr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # glyr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with glyr. If not, see <http://www.gnu.org/licenses/>. ################################################################# #!/usr/bin/env python # encoding: utf-8 # Common lambdas, or other test utils len_greater_0 = lambda results: len(results) > 0 len_equal_0 = lambda results: len(results) == 0
emillon/glyr-debian
spec/provider/tests/__common__.py
Python
gpl-3.0
1,130
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Core Zen Coding library. Contains various text manipulation functions: == Expand abbreviation Expands abbreviation like ul#nav>li*5>a into a XHTML string. === How to use First, you have to extract current string (where cursor is) from your test editor and use <code>find_abbr_in_line()</code> method to extract abbreviation. If abbreviation was found, this method will return it as well as position index of abbreviation inside current line. If abbreviation wasn't found, method returns empty string. With abbreviation found, you should call <code>parse_into_tree()</code> method to transform abbreviation into a tag tree. This method returns <code>Tag</code> object on success, None on failure. Then simply call <code>to_string()</code> method of returned <code>Tag</code> object to transoform tree into a XHTML string You can setup output profile using <code>setup_profile()</code> method (see <code>default_profile</code> definition for available options) Created on Apr 17, 2009 @author: Sergey Chikuyonok (http://chikuyonok.ru) ''' from zen_settings import zen_settings import re import stparser newline = '\n' "Newline symbol" caret_placeholder = '{%::zen-caret::%}' default_tag = 'div' re_tag = re.compile(r'<\/?[\w:\-]+(?:\s+[\w\-:]+(?:\s*=\s*(?:(?:"[^"]*")|(?:\'[^\']*\')|[^>\s]+))?)*\s*(\/?)>$') profiles = {} "Available output profiles" default_profile = { 'tag_case': 'lower', # values are 'lower', 'upper' 'attr_case': 'lower', # values are 'lower', 'upper' 'attr_quotes': 'double', # values are 'single', 'double' 'tag_nl': 'decide', # each tag on new line, values are True, False, 'decide' 'place_cursor': True, # place cursor char — | (pipe) — in output 'indent': True, # indent tags 'inline_break': 3, # how many inline elements should be to force line break (set to 0 to disable) 'self_closing_tag': 'xhtml' # use self-closing style for writing empty elements, e.g. <br /> or <br>. # values are True, False, 'xhtml' } basic_filters = 'html'; "Filters that will be applied for unknown syntax" max_tabstop = 0 "Maximum tabstop index for current session" def char_at(text, pos): """ Returns character at specified index of text. If index if out of range, returns empty string """ return text[pos] if pos < len(text) else '' def has_deep_key(obj, key): """ Check if <code>obj</code> dictionary contains deep key. For example, example, it will allow you to test existance of my_dict[key1][key2][key3], testing existance of my_dict[key1] first, then my_dict[key1][key2], and finally my_dict[key1][key2][key3] @param obj: Dictionary to test @param obj: dict @param key: Deep key to test. Can be list (like ['key1', 'key2', 'key3']) or string (like 'key1.key2.key3') @type key: list, tuple, str @return: bool """ if isinstance(key, str): key = key.split('.') last_obj = obj for v in key: if hasattr(last_obj, v): last_obj = getattr(last_obj, v) elif last_obj.has_key(v): last_obj = last_obj[v] else: return False return True def is_allowed_char(ch): """ Test if passed symbol is allowed in abbreviation @param ch: Symbol to test @type ch: str @return: bool """ return ch.isalnum() or ch in "#.>+*:$-_!@[]()|" def split_by_lines(text, remove_empty=False): """ Split text into lines. Set <code>remove_empty</code> to true to filter out empty lines @param text: str @param remove_empty: bool @return list """ lines = text.splitlines() return remove_empty and [line for line in lines if line.strip()] or lines def make_map(prop): """ Helper function that transforms string into dictionary for faster search @param prop: Key name in <code>zen_settings['html']</code> dictionary @type prop: str """ obj = {} for a in zen_settings['html'][prop].split(','): obj[a] = True zen_settings['html'][prop] = obj def create_profile(options): """ Create profile by adding default values for passed optoin set @param options: Profile options @type options: dict """ for k, v in default_profile.items(): options.setdefault(k, v) return options def setup_profile(name, options = {}): """ @param name: Profile name @type name: str @param options: Profile options @type options: dict """ profiles[name.lower()] = create_profile(options); def get_newline(): """ Returns newline symbol which is used in editor. This function must be redefined to return current editor's settings @return: str """ return newline def set_newline(char): """ Sets newline character used in Zen Coding """ global newline newline = char def string_to_hash(text): """ Helper function that transforms string into hash @return: dict """ obj = {} items = text.split(",") for i in items: obj[i] = True return obj def pad_string(text, pad): """ Indents string with space characters (whitespace or tab) @param text: Text to indent @type text: str @param pad: Indentation level (number) or indentation itself (string) @type pad: int, str @return: str """ pad_str = '' result = '' if isinstance(pad, basestring): pad_str = pad else: pad_str = get_indentation() * pad nl = get_newline() lines = split_by_lines(text) if lines: result += lines[0] for line in lines[1:]: result += nl + pad_str + line return result def is_snippet(abbr, doc_type = 'html'): """ Check is passed abbreviation is a snippet @return bool """ return get_snippet(doc_type, abbr) and True or False def is_ends_with_tag(text): """ Test is string ends with XHTML tag. This function used for testing if '<' symbol belogs to tag or abbreviation @type text: str @return: bool """ return re_tag.search(text) != None def get_elements_collection(resource, type): """ Returns specified elements collection (like 'empty', 'block_level') from <code>resource</code>. If collections wasn't found, returns empty object @type resource: dict @type type: str @return: dict """ if 'element_types' in resource and type in resource['element_types']: return resource['element_types'][type] else: return {} def replace_variables(text): """ Replace variables like ${var} in string @param text: str @return: str """ return re.sub(r'\$\{([\w\-]+)\}', lambda m: get_variable(m.group(1)) or m.group(0), text) def get_abbreviation(res_type, abbr): """ Returns abbreviation value from data set @param res_type: Resource type (html, css, ...) @type res_type: str @param abbr: Abbreviation name @type abbr: str @return dict, None """ return get_settings_resource(res_type, abbr, 'abbreviations') def get_snippet(res_type, snippet_name): """ Returns snippet value from data set @param res_type: Resource type (html, css, ...) @type res_type: str @param snippet_name: Snippet name @type snippet_name: str @return dict, None """ return get_settings_resource(res_type, snippet_name, 'snippets'); def get_variable(name): """ Returns variable value @return: str """ if name in zen_settings['variables']: return zen_settings['variables'][name] return None def set_variable(name, value): """ Set variable value """ zen_settings['variables'][name] = value def get_indentation(): """ Returns indentation string @return {String} """ return get_variable('indentation'); def create_resource_chain(syntax, name): """ Creates resource inheritance chain for lookups @param syntax: Syntax name @type syntax: str @param name: Resource name @type name: str @return: list """ result = [] if syntax in zen_settings: resource = zen_settings[syntax] if name in resource: result.append(resource[name]) if 'extends' in resource: # find resource in ancestors for type in resource['extends']: if has_deep_key(zen_settings, [type, name]): result.append(zen_settings[type][name]) return result def get_resource(syntax, name): """ Get resource collection from settings file for specified syntax. It follows inheritance chain if resource wasn't directly found in syntax settings @param syntax: Syntax name @type syntax: str @param name: Resource name @type name: str """ chain = create_resource_chain(syntax, name) return chain[0] if chain else None def get_settings_resource(syntax, abbr, name): """ Returns resurce value from data set with respect of inheritance @param syntax: Resource syntax (html, css, ...) @type syntax: str @param abbr: Abbreviation name @type abbr: str @param name: Resource name ('snippets' or 'abbreviation') @type name: str @return dict, None """ for item in create_resource_chain(syntax, name): if abbr in item: return item[abbr] return None def get_word(ix, text): """ Get word, starting at <code>ix</code> character of <code>text</code> @param ix: int @param text: str """ m = re.match(r'^[\w\-:\$]+', text[ix:]) return m.group(0) if m else '' def extract_attributes(attr_set): """ Extract attributes and their values from attribute set @param attr_set: str """ attr_set = attr_set.strip() loop_count = 100 # endless loop protection re_string = r'^(["\'])((?:(?!\1)[^\\]|\\.)*)\1' result = [] while attr_set and loop_count: loop_count -= 1 attr_name = get_word(0, attr_set) attr = None if attr_name: attr = {'name': attr_name, 'value': ''} # let's see if attribute has value ch = attr_set[len(attr_name)] if len(attr_set) > len(attr_name) else '' if ch == '=': ch2 = attr_set[len(attr_name) + 1] if ch2 in '"\'': # we have a quoted string m = re.match(re_string, attr_set[len(attr_name) + 1:]) if m: attr['value'] = m.group(2) attr_set = attr_set[len(attr_name) + len(m.group(0)) + 1:].strip() else: # something wrong, break loop attr_set = '' else: # unquoted string m = re.match(r'^(.+?)(\s|$)', attr_set[len(attr_name) + 1:]) if m: attr['value'] = m.group(1) attr_set = attr_set[len(attr_name) + len(m.group(1)) + 1:].strip() else: # something wrong, break loop attr_set = '' else: attr_set = attr_set[len(attr_name):].strip() else: # something wrong, can't extract attribute name break if attr: result.append(attr) return result def parse_attributes(text): """ Parses tag attributes extracted from abbreviation """ # Example of incoming data: # #header # .some.data # .some.data#header # [attr] # #item[attr=Hello other="World"].class result = [] class_name = None char_map = {'#': 'id', '.': 'class'} # walk char-by-char i = 0 il = len(text) while i < il: ch = text[i] if ch == '#': # id val = get_word(i, text[1:]) result.append({'name': char_map[ch], 'value': val}) i += len(val) + 1 elif ch == '.': #class val = get_word(i, text[1:]) if not class_name: # remember object pointer for value modification class_name = {'name': char_map[ch], 'value': ''} result.append(class_name) if class_name['value']: class_name['value'] += ' ' + val else: class_name['value'] = val i += len(val) + 1 elif ch == '[': # begin attribute set # search for end of set end_ix = text.find(']', i) if end_ix == -1: # invalid attribute set, stop searching i = len(text) else: result.extend(extract_attributes(text[i + 1:end_ix])) i = end_ix else: i += 1 return result class AbbrGroup(object): """ Abreviation's group element """ def __init__(self, parent=None): """ @param parent: Parent group item element @type parent: AbbrGroup """ self.expr = '' self.parent = parent self.children = [] def add_child(self): child = AbbrGroup(self) self.children.append(child) return child def clean_up(self): for item in self.children: expr = item.expr if not expr: self.children.remove(item) else: # remove operators at the and of expression item.clean_up() def split_by_groups(abbr): """ Split abbreviation by groups @type abbr: str @return: AbbrGroup """ root = AbbrGroup() last_parent = root cur_item = root.add_child() stack = [] i = 0 il = len(abbr) while i < il: ch = abbr[i] if ch == '(': # found new group operator = i and abbr[i - 1] or '' if operator == '>': stack.append(cur_item) last_parent = cur_item else: stack.append(last_parent) cur_item = None elif ch == ')': last_parent = stack.pop() cur_item = None next_char = char_at(abbr, i + 1) if next_char == '+' or next_char == '>': # next char is group operator, skip it i += 1 else: if ch == '+' or ch == '>': # skip operator if it's followed by parenthesis next_char = char_at(abbr, i + 1) if next_char == '(': i += 1 continue if not cur_item: cur_item = last_parent.add_child() cur_item.expr += ch i += 1 root.clean_up() return root def rollout_tree(tree, parent=None): """ Roll outs basic Zen Coding tree into simplified, DOM-like tree. The simplified tree, for example, represents each multiplied element as a separate element sets with its own content, if exists. The simplified tree element contains some meta info (tag name, attributes, etc.) as well as output strings, which are exactly what will be outputted after expanding abbreviation. This tree is used for <i>filtering</i>: you can apply filters that will alter output strings to get desired look of expanded abbreviation. @type tree: Tag @param parent: ZenNode """ if not parent: parent = ZenNode(tree) how_many = 1 tag_content = '' for child in tree.children: how_many = child.count if child.repeat_by_lines: # it's a repeating element tag_content = split_by_lines(child.get_content(), True) how_many = max(len(tag_content), 1) else: tag_content = child.get_content() for j in range(how_many): tag = ZenNode(child) parent.add_child(tag) tag.counter = j + 1 if child.children: rollout_tree(child, tag) add_point = tag.find_deepest_child() or tag if tag_content: if isinstance(tag_content, basestring): add_point.content = tag_content else: add_point.content = tag_content[j] or '' return parent def run_filters(tree, profile, filter_list): """ Runs filters on tree @type tree: ZenNode @param profile: str, object @param filter_list: str, list @return: ZenNode """ import filters if isinstance(profile, basestring) and profile in profiles: profile = profiles[profile]; if not profile: profile = profiles['plain'] if isinstance(filter_list, basestring): filter_list = re.split(r'[\|,]', filter_list) for name in filter_list: name = name.strip() if name and name in filters.filter_map: tree = filters.filter_map[name](tree, profile) return tree def abbr_to_primary_tree(abbr, doc_type='html'): """ Transforms abbreviation into a primary internal tree. This tree should'n be used ouside of this scope @param abbr: Abbreviation to transform @type abbr: str @param doc_type: Document type (xsl, html), a key of dictionary where to search abbreviation settings @type doc_type: str @return: Tag """ root = Tag('', 1, doc_type) token = re.compile(r'([\+>])?([a-z@\!\#\.][\w:\-]*)((?:(?:[#\.][\w\-\$]+)|(?:\[[^\]]+\]))+)?(\*(\d*))?(\+$)?', re.IGNORECASE) if not abbr: return None def expando_replace(m): ex = m.group(0) a = get_abbreviation(doc_type, ex) return a and a.value or ex def token_expander(operator, tag_name, attrs, has_multiplier, multiplier, has_expando): multiply_by_lines = (has_multiplier and not multiplier) multiplier = multiplier and int(multiplier) or 1 tag_ch = tag_name[0] if tag_ch == '#' or tag_ch == '.': if attrs: attrs = tag_name + attrs else: attrs = tag_name tag_name = default_tag if has_expando: tag_name += '+' current = is_snippet(tag_name, doc_type) and Snippet(tag_name, multiplier, doc_type) or Tag(tag_name, multiplier, doc_type) if attrs: attrs = parse_attributes(attrs) for attr in attrs: current.add_attribute(attr['name'], attr['value']) # dive into tree if operator == '>' and token_expander.last: token_expander.parent = token_expander.last; token_expander.parent.add_child(current) token_expander.last = current if multiply_by_lines: root.multiply_elem = current return '' # replace expandos abbr = re.sub(r'([a-z][a-z0-9]*)\+$', expando_replace, abbr) token_expander.parent = root token_expander.last = None # abbr = re.sub(token, lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6), m.group(7)), abbr) # Issue from Einar Egilsson abbr = token.sub(lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6)), abbr) root.last = token_expander.last # empty 'abbr' variable means that abbreviation was expanded successfully, # non-empty variable means there was a syntax error return not abbr and root or None; def expand_group(group, doc_type, parent): """ Expand single group item @param group: AbbrGroup @param doc_type: str @param parent: Tag """ tree = abbr_to_primary_tree(group.expr, doc_type) last_item = None if tree: for item in tree.children: last_item = item parent.add_child(last_item) else: raise Exception('InvalidGroup') # set repeating element to the topmost node root = parent while root.parent: root = root.parent root.last = tree.last if tree.multiply_elem: root.multiply_elem = tree.multiply_elem # process child groups if group.children: add_point = last_item.find_deepest_child() or last_item for child in group.children: expand_group(child, doc_type, add_point) def replace_unescaped_symbol(text, symbol, replace): """ Replaces unescaped symbols in <code>text</code>. For example, the '$' symbol will be replaced in 'item$count', but not in 'item\$count'. @param text: Original string @type text: str @param symbol: Symbol to replace @type symbol: st @param replace: Symbol replacement @type replace: str, function @return: str """ i = 0 il = len(text) sl = len(symbol) match_count = 0 while i < il: if text[i] == '\\': # escaped symbol, skip next character i += sl + 1 elif text[i:i + sl] == symbol: # have match cur_sl = sl match_count += 1 new_value = replace if callable(new_value): replace_data = replace(text, symbol, i, match_count) if replace_data: cur_sl = len(replace_data[0]) new_value = replace_data[1] else: new_value = False if new_value is False: # skip replacement i += 1 continue text = text[0:i] + new_value + text[i + cur_sl:] # adjust indexes il = len(text) i += len(new_value) else: i += 1 return text def run_action(name, *args, **kwargs): """ Runs Zen Coding action. For list of available actions and their arguments see zen_actions.py file. @param name: Action name @type name: str @param args: Additional arguments. It may be array of arguments or inline arguments. The first argument should be <code>zen_editor</code> instance @type args: list @example zen_coding.run_actions('expand_abbreviation', zen_editor) zen_coding.run_actions('wrap_with_abbreviation', zen_editor, 'div') """ import zen_actions try: if hasattr(zen_actions, name): return getattr(zen_actions, name)(*args, **kwargs) except: return False def expand_abbreviation(abbr, syntax='html', profile_name='plain'): """ Expands abbreviation into a XHTML tag string @type abbr: str @return: str """ tree_root = parse_into_tree(abbr, syntax); if tree_root: tree = rollout_tree(tree_root) apply_filters(tree, syntax, profile_name, tree_root.filters) return replace_variables(tree.to_string()) return '' def extract_abbreviation(text): """ Extracts abbreviations from text stream, starting from the end @type text: str @return: Abbreviation or empty string """ cur_offset = len(text) start_index = -1 brace_count = 0 while True: cur_offset -= 1 if cur_offset < 0: # moved at string start start_index = 0 break ch = text[cur_offset] if ch == ']': brace_count += 1 elif ch == '[': brace_count -= 1 else: if brace_count: # respect all characters inside attribute sets continue if not is_allowed_char(ch) or (ch == '>' and is_ends_with_tag(text[0:cur_offset + 1])): # found stop symbol start_index = cur_offset + 1 break return text[start_index:] if start_index != -1 else '' def parse_into_tree(abbr, doc_type='html'): """ Parses abbreviation into a node set @param abbr: Abbreviation to transform @type abbr: str @param doc_type: Document type (xsl, html), a key of dictionary where to search abbreviation settings @type doc_type: str @return: Tag """ # remove filters from abbreviation filter_list = [] def filter_replace(m): filter_list.append(m.group(1)) return '' re_filter = re.compile(r'\|([\w\|\-]+)$') abbr = re_filter.sub(filter_replace, abbr) # split abbreviation by groups group_root = split_by_groups(abbr) tree_root = Tag('', 1, doc_type) # then recursively expand each group item try: for item in group_root.children: expand_group(item, doc_type, tree_root) except: # there's invalid group, stop parsing return None tree_root.filters = ''.join(filter_list) return tree_root def is_inside_tag(html, cursor_pos): re_tag = re.compile(r'^<\/?\w[\w\:\-]*.*?>') # search left to find opening brace pos = cursor_pos while pos > -1: if html[pos] == '<': break pos -= 1 if pos != -1: m = re_tag.match(html[pos:]); if m and cursor_pos > pos and cursor_pos < pos + len(m.group(0)): return True return False def wrap_with_abbreviation(abbr, text, doc_type='html', profile='plain'): """ Wraps passed text with abbreviation. Text will be placed inside last expanded element @param abbr: Abbreviation @type abbr: str @param text: Text to wrap @type text: str @param doc_type: Document type (html, xml, etc.) @type doc_type: str @param profile: Output profile's name. @type profile: str @return {String} """ tree_root = parse_into_tree(abbr, doc_type) if tree_root: repeat_elem = tree_root.multiply_elem or tree_root.last repeat_elem.set_content(text) repeat_elem.repeat_by_lines = bool(tree_root.multiply_elem) tree = rollout_tree(tree_root) apply_filters(tree, doc_type, profile, tree_root.filters); return replace_variables(tree.to_string()) return None def get_caret_placeholder(): """ Returns caret placeholder @return: str """ if callable(caret_placeholder): return caret_placeholder() else: return caret_placeholder def set_caret_placeholder(value): """ Set caret placeholder: a string (like '|') or function. You may use a function as a placeholder generator. For example, TextMate uses ${0}, ${1}, ..., ${n} natively for quick Tab-switching between them. @param {String|Function} """ global caret_placeholder caret_placeholder = value def apply_filters(tree, syntax, profile, additional_filters=None): """ Applies filters to tree according to syntax @param tree: Tag tree to apply filters to @type tree: ZenNode @param syntax: Syntax name ('html', 'css', etc.) @type syntax: str @param profile: Profile or profile's name @type profile: str, object @param additional_filters: List or pipe-separated string of additional filters to apply @type additional_filters: str, list @return: ZenNode """ _filters = get_resource(syntax, 'filters') or basic_filters if additional_filters: _filters += '|' if isinstance(additional_filters, basestring): _filters += additional_filters else: _filters += '|'.join(additional_filters) if not _filters: # looks like unknown syntax, apply basic filters _filters = basic_filters return run_filters(tree, profile, _filters) def replace_counter(text, value): """ Replaces '$' character in string assuming it might be escaped with '\' @type text: str @type value: str, int @return: str """ symbol = '$' value = str(value) def replace_func(tx, symbol, pos, match_num): if char_at(tx, pos + 1) == '{' or char_at(tx, pos + 1).isdigit(): # it's a variable, skip it return False # replace sequense of $ symbols with padded number j = pos + 1 if j < len(text): while tx[j] == '$' and char_at(tx, j + 1) != '{': j += 1 return (tx[pos:j], value.zfill(j - pos)) return replace_unescaped_symbol(text, symbol, replace_func) def upgrade_tabstops(node): """ Upgrades tabstops in zen node in order to prevent naming conflicts @type node: ZenNode @param offset: Tab index offset @type offset: int @returns Maximum tabstop index in element """ max_num = [0] props = ('start', 'end', 'content') def _replace(m): num = int(m.group(1) or m.group(2)) if num > max_num[0]: max_num[0] = num return re.sub(r'\d+', str(num + max_tabstop), m.group(0), 1) for prop in props: node.__setattr__(prop, re.sub(r'\$(\d+)|\$\{(\d+):[^\}]+\}', _replace, node.__getattribute__(prop))) globals()['max_tabstop'] += max_num[0] return max_num[0] def unescape_text(text): """ Unescapes special characters used in Zen Coding, like '$', '|', etc. @type text: str @return: str """ return re.sub(r'\\(.)', r'\1', text) def get_profile(name): """ Get profile by it's name. If profile wasn't found, returns 'plain' profile """ return profiles[name] if name in profiles else profiles['plain'] def update_settings(settings): globals()['zen_settings'] = settings class Tag(object): def __init__(self, name, count=1, doc_type='html'): """ @param name: Tag name @type name: str @param count: How many times this tag must be outputted @type count: int @param doc_type: Document type (xsl, html) @type doc_type: str """ name = name.lower() abbr = get_abbreviation(doc_type, name) if abbr and abbr.type == stparser.TYPE_REFERENCE: abbr = get_abbreviation(doc_type, abbr.value) self.name = abbr and abbr.value['name'] or name.replace('+', '') self.count = count self.children = [] self.attributes = [] self.multiply_elem = None self.__attr_hash = {} self._abbr = abbr self.__content = '' self.repeat_by_lines = False self._res = zen_settings.has_key(doc_type) and zen_settings[doc_type] or {} self.parent = None # add default attributes if self._abbr and 'attributes' in self._abbr.value: for a in self._abbr.value['attributes']: self.add_attribute(a['name'], a['value']) def add_child(self, tag): """ Add new child @type tag: Tag """ tag.parent = self self.children.append(tag) def add_attribute(self, name, value): """ Add attribute to tag. If the attribute with the same name already exists, it will be overwritten, but if it's name is 'class', it will be merged with the existed one @param name: Attribute nama @type name: str @param value: Attribute value @type value: str """ # the only place in Tag where pipe (caret) character may exist # is the attribute: escape it with internal placeholder value = replace_unescaped_symbol(value, '|', get_caret_placeholder()); if name in self.__attr_hash: # attribue already exists a = self.__attr_hash[name] if name == 'class': # 'class' is a magic attribute if a['value']: value = ' ' + value a['value'] += value else: a['value'] = value else: a = {'name': name, 'value': value} self.__attr_hash[name] = a self.attributes.append(a) def has_tags_in_content(self): """ This function tests if current tags' content contains XHTML tags. This function is mostly used for output formatting """ return self.get_content() and re_tag.search(self.get_content()) def get_content(self): return self.__content def set_content(self, value): self.__content = value def set_content(self, content): #@DuplicatedSignature self.__content = content def get_content(self): #@DuplicatedSignature return self.__content def find_deepest_child(self): """ Search for deepest and latest child of current element. Returns None if there's no children @return Tag or None """ if not self.children: return None deepest_child = self while True: deepest_child = deepest_child.children[-1] if not deepest_child.children: break return deepest_child class Snippet(Tag): def __init__(self, name, count=1, doc_type='html'): super(Snippet, self).__init__(name, count, doc_type) self.value = replace_unescaped_symbol(get_snippet(doc_type, name), '|', get_caret_placeholder()) self.attributes = {'id': get_caret_placeholder(), 'class': get_caret_placeholder()} self._res = zen_settings[doc_type] def is_block(self): return True class ZenNode(object): """ Creates simplified tag from Zen Coding tag """ def __init__(self, tag): """ @type tag: Tag """ self.type = 'snippet' if isinstance(tag, Snippet) else 'tag' self.name = tag.name self.attributes = tag.attributes self.children = []; self.counter = 1 self.source = tag "Source element from which current tag was created" # relations self.parent = None self.next_sibling = None self.previous_sibling = None # output params self.start = '' self.end = '' self.content = '' self.padding = '' def add_child(self, tag): """ @type tag: ZenNode """ tag.parent = self if self.children: last_child = self.children[-1] tag.previous_sibling = last_child last_child.next_sibling = tag self.children.append(tag) def get_attribute(self, name): """ Get attribute's value. @type name: str @return: None if attribute wasn't found """ name = name.lower() for attr in self.attributes: if attr['name'].lower() == name: return attr['value'] return None def is_unary(self): """ Test if current tag is unary (no closing tag) @return: bool """ if self.type == 'snippet': return False return (self.source._abbr and self.source._abbr.value['is_empty']) or (self.name in get_elements_collection(self.source._res, 'empty')) def is_inline(self): """ Test if current tag is inline-level (like <strong>, <img>) @return: bool """ return self.name in get_elements_collection(self.source._res, 'inline_level') def is_block(self): """ Test if current element is block-level @return: bool """ return self.type == 'snippet' or not self.is_inline() def has_tags_in_content(self): """ This function tests if current tags' content contains xHTML tags. This function is mostly used for output formatting """ return self.content and re_tag.search(self.content) def has_children(self): """ Check if tag has child elements @return: bool """ return bool(self.children) def has_block_children(self): """ Test if current tag contains block-level children @return: bool """ if self.has_tags_in_content() and self.is_block(): return True for item in self.children: if item.is_block(): return True return False def find_deepest_child(self): """ Search for deepest and latest child of current element Returns None if there's no children @return: ZenNode|None """ if not self.children: return None deepest_child = self while True: deepest_child = deepest_child.children[-1] if not deepest_child.children: break return deepest_child def to_string(self): "@return {String}" content = ''.join([item.to_string() for item in self.children]) return self.start + self.content + content + self.end # create default profiles setup_profile('xhtml'); setup_profile('html', {'self_closing_tag': False}); setup_profile('xml', {'self_closing_tag': True, 'tag_nl': True}); setup_profile('plain', {'tag_nl': False, 'indent': False, 'place_cursor': False}); # This method call explicity loads default settings from zen_settings.py on start up # Comment this line if you want to load data from other resources (like editor's # native snippet) update_settings(stparser.get_settings())
Bruno-sm/fguess
training_files/Python/zen_core.py
Python
gpl-3.0
32,262
# Copyright: (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import bisect import json import pkgutil import re from ansible import constants as C from ansible.module_utils._text import to_native, to_text from ansible.module_utils.distro import LinuxDistribution from ansible.utils.display import Display from ansible.utils.plugin_docs import get_versioned_doclink from ansible.module_utils.compat.version import LooseVersion from traceback import format_exc display = Display() foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND') class InterpreterDiscoveryRequiredError(Exception): def __init__(self, message, interpreter_name, discovery_mode): super(InterpreterDiscoveryRequiredError, self).__init__(message) self.interpreter_name = interpreter_name self.discovery_mode = discovery_mode def __str__(self): return self.message def __repr__(self): # TODO: proper repr impl return self.message def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to # get the system type from uname, and find any random Python that can get us the info we need. For supported # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available) # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails. # FUTURE: add logical equivalence for "python3" in the case of py3-only modules? if interpreter_name != 'python': raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name)) host = task_vars.get('inventory_hostname', 'unknown') res = None platform_type = 'unknown' found_interpreters = [u'/usr/bin/python'] # fallback value is_auto_legacy = discovery_mode.startswith('auto_legacy') is_silent = discovery_mode.endswith('_silent') try: platform_python_map = C.config.get_config_value('INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars) bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars) display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host) # not all command -v impls accept a list of commands, so we have to call it once per python command_list = ["command -v '%s'" % py for py in bootstrap_python_list] shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list)) # FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do? res = action._low_level_execute_command(shell_bootstrap, sudoable=False) raw_stdout = res.get('stdout', u'') match = foundre.match(raw_stdout) if not match: display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host) raise ValueError('unexpected output from Python interpreter discovery') platform_type = match.groups()[0].lower().strip() found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')] display.debug(u"found interpreters: {0}".format(found_interpreters), host=host) if not found_interpreters: if not is_silent: action._discovery_warnings.append(u'No python interpreters found for ' u'host {0} (tried {1})'.format(host, bootstrap_python_list)) # this is lame, but returning None or throwing an exception is uglier return u'/usr/bin/python' if platform_type != 'linux': raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type))) platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py') # FUTURE: respect pipelining setting instead of just if the connection supports it? if action._connection.has_pipelining: res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script) else: # FUTURE: implement on-disk case (via script action or ?) raise NotImplementedError('pipelining support required for extended interpreter discovery') platform_info = json.loads(res.get('stdout')) distro, version = _get_linux_distro(platform_info) if not distro or not version: raise NotImplementedError('unable to get Linux distribution/version info') version_map = platform_python_map.get(distro.lower().strip()) if not version_map: raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro)) platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict') # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been) if is_auto_legacy: if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters: # FIXME: support comments in sivel's deprecation scanner so we can get reminded on this if not is_silent: action._discovery_deprecation_warnings.append(dict( msg=u"Distribution {0} {1} on host {2} should use {3}, but is using " u"/usr/bin/python for backward compatibility with prior Ansible releases. " u"A future Ansible release will default to using the discovered platform " u"python for this host. See {4} for more information" .format(distro, version, host, platform_interpreter, get_versioned_doclink('reference_appendices/interpreter_discovery.html')), version='2.12')) return u'/usr/bin/python' if platform_interpreter not in found_interpreters: if platform_interpreter not in bootstrap_python_list: # sanity check to make sure we looked for it if not is_silent: action._discovery_warnings \ .append(u"Platform interpreter {0} on host {1} is missing from bootstrap list" .format(platform_interpreter, host)) if not is_silent: action._discovery_warnings \ .append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the " u"discovered platform python interpreter was not present. See {5} " u"for more information." .format(distro, version, host, platform_interpreter, found_interpreters[0], get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) return found_interpreters[0] return platform_interpreter except NotImplementedError as ex: display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host) except Exception as ex: if not is_silent: display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex))) display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host) if res and res.get('stderr'): display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host) if not is_silent: action._discovery_warnings \ .append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of " u"another Python interpreter could change the meaning of that path. See {3} " u"for more information." .format(platform_type, host, found_interpreters[0], get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) return found_interpreters[0] def _get_linux_distro(platform_info): dist_result = platform_info.get('platform_dist_result', []) if len(dist_result) == 3 and any(dist_result): return dist_result[0], dist_result[1] osrelease_content = platform_info.get('osrelease_content') if not osrelease_content: return u'', u'' osr = LinuxDistribution._parse_os_release_content(osrelease_content) return osr.get('id', u''), osr.get('version_id', u'') def _version_fuzzy_match(version, version_map): # try exact match first res = version_map.get(version) if res: return res sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()]) find_looseversion = LooseVersion(version) # slot match; return nearest previous version we're newer than kpos = bisect.bisect(sorted_looseversions, find_looseversion) if kpos == 0: # older than everything in the list, return the oldest version # TODO: warning-worthy? return version_map.get(sorted_looseversions[0].vstring) # TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)? # return the next-oldest entry that we're newer than... return version_map.get(sorted_looseversions[kpos - 1].vstring)
maxamillion/ansible
lib/ansible/executor/interpreter_discovery.py
Python
gpl-3.0
9,978
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fnmatch import os import re import itertools from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.inventory.data import InventoryData from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes, to_text from ansible.parsing.utils.addresses import parse_address from ansible.plugins.loader import inventory_loader from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"] IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS] IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS] IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS)) def order_patterns(patterns): ''' takes a list of patterns and reorders them by modifier to apply them consistently ''' # FIXME: this goes away if we apply patterns incrementally or by groups pattern_regular = [] pattern_intersection = [] pattern_exclude = [] for p in patterns: if p.startswith("!"): pattern_exclude.append(p) elif p.startswith("&"): pattern_intersection.append(p) elif p: pattern_regular.append(p) # if no regular pattern was given, hence only exclude and/or intersection # make that magically work if pattern_regular == []: pattern_regular = ['all'] # when applying the host selectors, run those without the "&" or "!" # first, then the &s, then the !s. return pattern_regular + pattern_intersection + pattern_exclude def split_host_pattern(pattern): """ Takes a string containing host patterns separated by commas (or a list thereof) and returns a list of single patterns (which may not contain commas). Whitespace is ignored. Also accepts ':' as a separator for backwards compatibility, but it is not recommended due to the conflict with IPv6 addresses and host ranges. Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd'] """ if isinstance(pattern, list): return list(itertools.chain(*map(split_host_pattern, pattern))) elif not isinstance(pattern, string_types): pattern = to_text(pattern, errors='surrogate_or_strict') # If it's got commas in it, we'll treat it as a straightforward # comma-separated list of patterns. if u',' in pattern: patterns = pattern.split(u',') # If it doesn't, it could still be a single pattern. This accounts for # non-separator uses of colons: IPv6 addresses and [x:y] host ranges. else: try: (base, port) = parse_address(pattern, allow_ranges=True) patterns = [pattern] except Exception: # The only other case we accept is a ':'-separated list of patterns. # This mishandles IPv6 addresses, and is retained only for backwards # compatibility. patterns = re.findall( to_text(r'''(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' | # ...or... \[[^\]]*\] # a single complete bracketed expression) )+ # occurring once or more '''), pattern, re.X ) return [p.strip() for p in patterns] class InventoryManager(object): ''' Creates and manages inventory ''' def __init__(self, loader, sources=None): # base objects self._loader = loader self._inventory = InventoryData() # a list of host(names) to contain current inquiries to self._restriction = None self._subset = None # caches self._hosts_patterns_cache = {} # resolved full patterns self._pattern_cache = {} # resolved individual patterns self._inventory_plugins = [] # for generating inventory # the inventory dirs, files, script paths or lists of hosts if sources is None: self._sources = [] elif isinstance(sources, string_types): self._sources = [sources] else: self._sources = sources # get to work! self.parse_sources(cache=True) @property def localhost(self): return self._inventory.localhost @property def groups(self): return self._inventory.groups @property def hosts(self): return self._inventory.hosts def get_vars(self, *args, **kwargs): return self._inventory.get_vars(args, kwargs) def add_host(self, host, group=None, port=None): return self._inventory.add_host(host, group, port) def add_group(self, group): return self._inventory.add_group(group) def get_groups_dict(self): return self._inventory.get_groups_dict() def reconcile_inventory(self): self.clear_caches() return self._inventory.reconcile_inventory() def get_host(self, hostname): return self._inventory.get_host(hostname) def _setup_inventory_plugins(self): ''' sets up loaded inventory plugins for usage ''' display.vvvv('setting up inventory plugins') for name in C.INVENTORY_ENABLED: plugin = inventory_loader.get(name) if plugin: self._inventory_plugins.append(plugin) else: display.warning('Failed to load inventory plugin, skipping %s' % name) if not self._inventory_plugins: raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.") def parse_sources(self, cache=False): ''' iterate over inventory sources and parse each one to populate it''' self._setup_inventory_plugins() parsed = False # allow for multiple inventory parsing for source in self._sources: if source: if ',' not in source: source = unfrackpath(source, follow=False) parse = self.parse_source(source, cache=cache) if parse and not parsed: parsed = True if parsed: # do post processing self._inventory.reconcile_inventory() else: if C.INVENTORY_UNPARSED_IS_FAILED: raise AnsibleError("No inventory was parsed, please check your configuration and options.") else: display.warning("No inventory was parsed, only implicit localhost is available") self._inventory_plugins = [] def parse_source(self, source, cache=False): ''' Generate or update inventory for the source provided ''' parsed = False display.debug(u'Examining possible inventory source: %s' % source) # use binary for path functions b_source = to_bytes(source) # process directories as a collection of inventories if os.path.isdir(b_source): display.debug(u'Searching for inventory files in directory: %s' % source) for i in sorted(os.listdir(b_source)): display.debug(u'Considering %s' % i) # Skip hidden files and stuff we explicitly ignore if IGNORED.search(i): continue # recursively deal with directory entries fullpath = to_text(os.path.join(b_source, i), errors='surrogate_or_strict') parsed_this_one = self.parse_source(fullpath, cache=cache) display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one)) if not parsed: parsed = parsed_this_one else: # left with strings or files, let plugins figure it out # set so new hosts can use for inventory_file/dir vasr self._inventory.current_source = source # get inventory plugins if needed, there should always be at least one generator if not self._inventory_plugins: self._setup_inventory_plugins() # try source with each plugin failures = [] for plugin in self._inventory_plugins: plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', ''))) display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path)) # initialize and figure out if plugin wants to attempt parsing this file try: plugin_wants = bool(plugin.verify_file(source)) except Exception: plugin_wants = False if plugin_wants: try: # in case plugin fails 1/2 way we dont want partial inventory plugin.parse(self._inventory, self._loader, source, cache=cache) parsed = True display.vvv('Parsed %s inventory source with %s plugin' % (source, plugin_name)) break except AnsibleParserError as e: display.debug('%s was not parsable by %s' % (source, plugin_name)) failures.append({'src': source, 'plugin': plugin_name, 'exc': e}) except Exception as e: display.debug('%s failed to parse %s' % (plugin_name, source)) failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e)}) else: display.v('%s did not meet %s requirements, check plugin documentation if this is unexpected' % (source, plugin_name)) else: if not parsed and failures: # only if no plugin processed files should we show errors. for fail in failures: display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc']))) if hasattr(fail['exc'], 'tb'): display.vvv(to_text(fail['exc'].tb)) if C.INVENTORY_ANY_UNPARSED_IS_FAILED: raise AnsibleError(u'Completely failed to parse inventory source %s' % (source)) if not parsed: if source != '/etc/ansible/hosts' or os.path.exists(source): # only warn if NOT using the default and if using it, only if the file is present display.warning("Unable to parse %s as an inventory source" % source) # clear up, jic self._inventory.current_source = None return parsed def clear_caches(self): ''' clear all caches ''' self._hosts_patterns_cache = {} self._pattern_cache = {} # FIXME: flush inventory cache def refresh_inventory(self): ''' recalculate inventory ''' self.clear_caches() self._inventory = InventoryData() self.parse_sources(cache=False) def _match_list(self, items, pattern_str): # compile patterns try: if not pattern_str.startswith('~'): pattern = re.compile(fnmatch.translate(pattern_str)) else: pattern = re.compile(pattern_str[1:]) except Exception: raise AnsibleError('Invalid host list pattern: %s' % pattern_str) # apply patterns results = [] for item in items: if pattern.match(item): results.append(item) return results def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None): """ Takes a pattern or list of patterns and returns a list of matching inventory host names, taking into account any active restrictions or applied subsets """ hosts = [] # Check if pattern already computed if isinstance(pattern, list): pattern_hash = u":".join(pattern) else: pattern_hash = pattern if pattern_hash: if not ignore_limits and self._subset: pattern_hash += u":%s" % to_text(self._subset, errors='surrogate_or_strict') if not ignore_restrictions and self._restriction: pattern_hash += u":%s" % to_text(self._restriction, errors='surrogate_or_strict') if pattern_hash not in self._hosts_patterns_cache: patterns = split_host_pattern(pattern) hosts = self._evaluate_patterns(patterns) # mainly useful for hostvars[host] access if not ignore_limits and self._subset: # exclude hosts not in a subset, if defined subset = self._evaluate_patterns(self._subset) hosts = [h for h in hosts if h in subset] if not ignore_restrictions and self._restriction: # exclude hosts mentioned in any restriction (ex: failed hosts) hosts = [h for h in hosts if h.name in self._restriction] seen = set() self._hosts_patterns_cache[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)] # sort hosts list if needed (should only happen when called from strategy) if order in ['sorted', 'reverse_sorted']: from operator import attrgetter hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted')) elif order == 'reverse_inventory': hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True) else: hosts = self._hosts_patterns_cache[pattern_hash][:] if order == 'shuffle': from random import shuffle shuffle(hosts) elif order not in [None, 'inventory']: raise AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order) return hosts def _evaluate_patterns(self, patterns): """ Takes a list of patterns and returns a list of matching host names, taking into account any negative and intersection patterns. """ patterns = order_patterns(patterns) hosts = [] for p in patterns: # avoid resolving a pattern that is a plain host if p in self._inventory.hosts: hosts.append(self._inventory.get_host(p)) else: that = self._match_one_pattern(p) if p.startswith("!"): hosts = [h for h in hosts if h not in frozenset(that)] elif p.startswith("&"): hosts = [h for h in hosts if h in frozenset(that)] else: hosts.extend([h for h in that if h.name not in frozenset([y.name for y in hosts])]) return hosts def _match_one_pattern(self, pattern): """ Takes a single pattern and returns a list of matching host names. Ignores intersection (&) and exclusion (!) specifiers. The pattern may be: 1. A regex starting with ~, e.g. '~[abc]*' 2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*' 3. An ordinary word that matches itself only, e.g. 'foo' The pattern is matched using the following rules: 1. If it's 'all', it matches all hosts in all groups. 2. Otherwise, for each known group name: (a) if it matches the group name, the results include all hosts in the group or any of its children. (b) otherwise, if it matches any hosts in the group, the results include the matching hosts. This means that 'foo*' may match one or more groups (thus including all hosts therein) but also hosts in other groups. The built-in groups 'all' and 'ungrouped' are special. No pattern can match these group names (though 'all' behaves as though it matches, as described above). The word 'ungrouped' can match a host of that name, and patterns like 'ungr*' and 'al*' can match either hosts or groups other than all and ungrouped. If the pattern matches one or more group names according to these rules, it may have an optional range suffix to select a subset of the results. This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does not work (the [1] is interpreted as part of the regex), but 'foo*[1]' would work if 'foo*' matched the name of one or more groups. Duplicate matches are always eliminated from the results. """ if pattern.startswith("&") or pattern.startswith("!"): pattern = pattern[1:] if pattern not in self._pattern_cache: (expr, slice) = self._split_subscript(pattern) hosts = self._enumerate_matches(expr) try: hosts = self._apply_subscript(hosts, slice) except IndexError: raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern) self._pattern_cache[pattern] = hosts return self._pattern_cache[pattern] def _split_subscript(self, pattern): """ Takes a pattern, checks if it has a subscript, and returns the pattern without the subscript and a (start,end) tuple representing the given subscript (or None if there is no subscript). Validates that the subscript is in the right syntax, but doesn't make sure the actual indices make sense in context. """ # Do not parse regexes for enumeration info if pattern.startswith('~'): return (pattern, None) # We want a pattern followed by an integer or range subscript. # (We can't be more restrictive about the expression because the # fnmatch semantics permit [\[:\]] to occur.) pattern_with_subscript = re.compile( r'''^ (.+) # A pattern expression ending with... \[(?: # A [subscript] expression comprising: (-?[0-9]+)| # A single positive or negative number ([0-9]+)([:-]) # Or an x:y or x: range. ([0-9]*) )\] $ ''', re.X ) subscript = None m = pattern_with_subscript.match(pattern) if m: (pattern, idx, start, sep, end) = m.groups() if idx: subscript = (int(idx), None) else: if not end: end = -1 subscript = (int(start), int(end)) if sep == '-': display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed") return (pattern, subscript) def _apply_subscript(self, hosts, subscript): """ Takes a list of hosts and a (start,end) tuple and returns the subset of hosts based on the subscript (which may be None to return all hosts). """ if not hosts or not subscript: return hosts (start, end) = subscript if end: if end == -1: end = len(hosts) - 1 return hosts[start:end + 1] else: return [hosts[start]] def _enumerate_matches(self, pattern): """ Returns a list of host names matching the given pattern according to the rules explained above in _match_one_pattern. """ results = [] # check if pattern matches group matching_groups = self._match_list(self._inventory.groups, pattern) if matching_groups: for groupname in matching_groups: results.extend(self._inventory.groups[groupname].get_hosts()) # check hosts if no groups matched or it is a regex/glob pattern if not matching_groups or pattern.startswith('~') or any(special in pattern for special in ('.', '?', '*', '[')): # pattern might match host matching_hosts = self._match_list(self._inventory.hosts, pattern) if matching_hosts: for hostname in matching_hosts: results.append(self._inventory.hosts[hostname]) if not results and pattern in C.LOCALHOST: # get_host autocreates implicit when needed implicit = self._inventory.get_host(pattern) if implicit: results.append(implicit) # Display warning if specified host pattern did not match any groups or hosts if not results and not matching_groups and pattern != 'all': display.warning("Could not match supplied host pattern, ignoring: %s" % pattern) return results def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ # FIXME: cache? result = [h for h in self.get_hosts(pattern)] # allow implicit localhost if pattern matches and no other results if len(result) == 0 and pattern in C.LOCALHOST: result = [pattern] return result def list_groups(self): # FIXME: cache? return sorted(self._inventory.groups.keys(), key=lambda x: x) def restrict_to_hosts(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to batch serial operations in main playbook code, don't use this for other reasons. """ if restriction is None: return elif not isinstance(restriction, list): restriction = [restriction] self._restriction = [h.name for h in restriction] def subset(self, subset_pattern): """ Limits inventory results to a subset of inventory that matches a given pattern, such as to select a given geographic of numeric slice amongst a previous 'hosts' selection that only select roles, or vice versa. Corresponds to --limit parameter to ansible-playbook """ if subset_pattern is None: self._subset = None else: subset_patterns = split_host_pattern(subset_pattern) results = [] # allow Unix style @filename data for x in subset_patterns: if x.startswith("@"): fd = open(x[1:]) results.extend(fd.read().split("\n")) fd.close() else: results.append(x) self._subset = results def remove_restriction(self): """ Do not restrict list operations """ self._restriction = None def clear_pattern_cache(self): self._pattern_cache = {}
shepdelacreme/ansible
lib/ansible/inventory/manager.py
Python
gpl-3.0
24,181
import numpy as np def kMeans(X, K, maxIters = 10): centroids = X[np.random.choice(np.arange(len(X)), K), :] for i in range(maxIters): # Cluster Assignment step C = np.array([np.argmin([np.dot(x_i-y_k, x_i-y_k) for y_k in centroids]) for x_i in X]) # Move centroids step centroids = [X[C == k].mean(axis = 0) for k in range(K)] return np.array(centroids) , C m1, cov1 = [9, 8], [[1.5, 2], [1, 2]] m2, cov2 = [5, 13], [[2.5, -1.5], [-1.5, 1.5]] m3, cov3 = [3, 7], [[0.25, 0.5], [-0.1, 0.5]] data1 = np.random.multivariate_normal(m1, cov1, 250) data2 = np.random.multivariate_normal(m2, cov2, 180) data3 = np.random.multivariate_normal(m3, cov3, 100) print data1 print data2 print data3 X = np.vstack((data1,np.vstack((data2,data3)))) np.random.shuffle(X) centroids, C = kMeans(X, K = 3) print centroids print C #Clusters to which they belong
hacktoberfest17/programming
machine_learning/Kmeans/kmeans.py
Python
gpl-3.0
900
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fnmatch import os import re import itertools from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.inventory.data import InventoryData from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes, to_text from ansible.parsing.utils.addresses import parse_address from ansible.plugins.loader import inventory_loader from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"] IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS] IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS] IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS)) def order_patterns(patterns): ''' takes a list of patterns and reorders them by modifier to apply them consistently ''' # FIXME: this goes away if we apply patterns incrementally or by groups pattern_regular = [] pattern_intersection = [] pattern_exclude = [] for p in patterns: if p.startswith("!"): pattern_exclude.append(p) elif p.startswith("&"): pattern_intersection.append(p) elif p: pattern_regular.append(p) # if no regular pattern was given, hence only exclude and/or intersection # make that magically work if pattern_regular == []: pattern_regular = ['all'] # when applying the host selectors, run those without the "&" or "!" # first, then the &s, then the !s. return pattern_regular + pattern_intersection + pattern_exclude def split_host_pattern(pattern): """ Takes a string containing host patterns separated by commas (or a list thereof) and returns a list of single patterns (which may not contain commas). Whitespace is ignored. Also accepts ':' as a separator for backwards compatibility, but it is not recommended due to the conflict with IPv6 addresses and host ranges. Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd'] """ if isinstance(pattern, list): return list(itertools.chain(*map(split_host_pattern, pattern))) elif not isinstance(pattern, string_types): pattern = to_text(pattern, errors='surrogate_or_strict') # If it's got commas in it, we'll treat it as a straightforward # comma-separated list of patterns. if u',' in pattern: patterns = pattern.split(u',') # If it doesn't, it could still be a single pattern. This accounts for # non-separator uses of colons: IPv6 addresses and [x:y] host ranges. else: try: (base, port) = parse_address(pattern, allow_ranges=True) patterns = [pattern] except Exception: # The only other case we accept is a ':'-separated list of patterns. # This mishandles IPv6 addresses, and is retained only for backwards # compatibility. patterns = re.findall( to_text(r'''(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' | # ...or... \[[^\]]*\] # a single complete bracketed expression) )+ # occurring once or more '''), pattern, re.X ) return [p.strip() for p in patterns] class InventoryManager(object): ''' Creates and manages inventory ''' def __init__(self, loader, sources=None): # base objects self._loader = loader self._inventory = InventoryData() # a list of host(names) to contain current inquiries to self._restriction = None self._subset = None # caches self._hosts_patterns_cache = {} # resolved full patterns self._pattern_cache = {} # resolved individual patterns self._inventory_plugins = [] # for generating inventory # the inventory dirs, files, script paths or lists of hosts if sources is None: self._sources = [] elif isinstance(sources, string_types): self._sources = [sources] else: self._sources = sources # get to work! self.parse_sources(cache=True) @property def localhost(self): return self._inventory.localhost @property def groups(self): return self._inventory.groups @property def hosts(self): return self._inventory.hosts def get_vars(self, *args, **kwargs): return self._inventory.get_vars(args, kwargs) def add_host(self, host, group=None, port=None): return self._inventory.add_host(host, group, port) def add_group(self, group): return self._inventory.add_group(group) def get_groups_dict(self): return self._inventory.get_groups_dict() def reconcile_inventory(self): self.clear_caches() return self._inventory.reconcile_inventory() def get_host(self, hostname): return self._inventory.get_host(hostname) def _setup_inventory_plugins(self): ''' sets up loaded inventory plugins for usage ''' display.vvvv('setting up inventory plugins') for name in C.INVENTORY_ENABLED: plugin = inventory_loader.get(name) if plugin: plugin.set_options() self._inventory_plugins.append(plugin) else: display.warning('Failed to load inventory plugin, skipping %s' % name) if not self._inventory_plugins: raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.") def parse_sources(self, cache=False): ''' iterate over inventory sources and parse each one to populate it''' self._setup_inventory_plugins() parsed = False # allow for multiple inventory parsing for source in self._sources: if source: if ',' not in source: source = unfrackpath(source, follow=False) parse = self.parse_source(source, cache=cache) if parse and not parsed: parsed = True if parsed: # do post processing self._inventory.reconcile_inventory() else: if C.INVENTORY_UNPARSED_IS_FAILED: raise AnsibleError("No inventory was parsed, please check your configuration and options.") else: display.warning("No inventory was parsed, only implicit localhost is available") self._inventory_plugins = [] def parse_source(self, source, cache=False): ''' Generate or update inventory for the source provided ''' parsed = False display.debug(u'Examining possible inventory source: %s' % source) b_source = to_bytes(source) # process directories as a collection of inventories if os.path.isdir(b_source): display.debug(u'Searching for inventory files in directory: %s' % source) for i in sorted(os.listdir(b_source)): display.debug(u'Considering %s' % i) # Skip hidden files and stuff we explicitly ignore if IGNORED.search(i): continue # recursively deal with directory entries b_fullpath = os.path.join(b_source, i) parsed_this_one = self.parse_source(b_fullpath, cache=cache) display.debug(u'parsed %s as %s' % (to_text(b_fullpath), parsed_this_one)) if not parsed: parsed = parsed_this_one else: # left with strings or files, let plugins figure it out # set so new hosts can use for inventory_file/dir vasr self._inventory.current_source = source # get inventory plugins if needed, there should always be at least one generator if not self._inventory_plugins: self._setup_inventory_plugins() # try source with each plugin failures = [] for plugin in self._inventory_plugins: plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', ''))) display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path)) # initialize and figure out if plugin wants to attempt parsing this file try: plugin_wants = bool(plugin.verify_file(source)) except Exception: plugin_wants = False if plugin_wants: try: # in case plugin fails 1/2 way we dont want partial inventory plugin.parse(self._inventory, self._loader, source, cache=cache) parsed = True display.vvv('Parsed %s inventory source with %s plugin' % (to_text(source), plugin_name)) break except AnsibleParserError as e: display.debug('%s was not parsable by %s' % (to_text(source), plugin_name)) failures.append({'src': source, 'plugin': plugin_name, 'exc': e}) except Exception as e: display.debug('%s failed to parse %s' % (plugin_name, to_text(source))) failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e)}) else: display.debug('%s did not meet %s requirements' % (to_text(source), plugin_name)) else: if not parsed and failures: # only if no plugin processed files should we show errors. for fail in failures: display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc']))) if hasattr(fail['exc'], 'tb'): display.vvv(to_text(fail['exc'].tb)) if C.INVENTORY_ANY_UNPARSED_IS_FAILED: raise AnsibleError(u'Completely failed to parse inventory source %s' % (to_text(source))) if not parsed: display.warning("Unable to parse %s as an inventory source" % to_text(source)) # clear up, jic self._inventory.current_source = None return parsed def clear_caches(self): ''' clear all caches ''' self._hosts_patterns_cache = {} self._pattern_cache = {} # FIXME: flush inventory cache def refresh_inventory(self): ''' recalculate inventory ''' self.clear_caches() self._inventory = InventoryData() self.parse_sources(cache=False) def _match_list(self, items, pattern_str): # compile patterns try: if not pattern_str.startswith('~'): pattern = re.compile(fnmatch.translate(pattern_str)) else: pattern = re.compile(pattern_str[1:]) except Exception: raise AnsibleError('Invalid host list pattern: %s' % pattern_str) # apply patterns results = [] for item in items: if pattern.match(item): results.append(item) return results def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None): """ Takes a pattern or list of patterns and returns a list of matching inventory host names, taking into account any active restrictions or applied subsets """ hosts = [] # Check if pattern already computed if isinstance(pattern, list): pattern_hash = u":".join(pattern) else: pattern_hash = pattern if pattern_hash: if not ignore_limits and self._subset: pattern_hash += u":%s" % to_text(self._subset, errors='surrogate_or_strict') if not ignore_restrictions and self._restriction: pattern_hash += u":%s" % to_text(self._restriction, errors='surrogate_or_strict') if pattern_hash not in self._hosts_patterns_cache: patterns = split_host_pattern(pattern) hosts = self._evaluate_patterns(patterns) # mainly useful for hostvars[host] access if not ignore_limits and self._subset: # exclude hosts not in a subset, if defined subset = self._evaluate_patterns(self._subset) hosts = [h for h in hosts if h in subset] if not ignore_restrictions and self._restriction: # exclude hosts mentioned in any restriction (ex: failed hosts) hosts = [h for h in hosts if h.name in self._restriction] seen = set() self._hosts_patterns_cache[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)] # sort hosts list if needed (should only happen when called from strategy) if order in ['sorted', 'reverse_sorted']: from operator import attrgetter hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted')) elif order == 'reverse_inventory': hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True) else: hosts = self._hosts_patterns_cache[pattern_hash][:] if order == 'shuffle': from random import shuffle shuffle(hosts) elif order not in [None, 'inventory']: AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order) return hosts def _evaluate_patterns(self, patterns): """ Takes a list of patterns and returns a list of matching host names, taking into account any negative and intersection patterns. """ patterns = order_patterns(patterns) hosts = [] for p in patterns: # avoid resolving a pattern that is a plain host if p in self._inventory.hosts: hosts.append(self._inventory.get_host(p)) else: that = self._match_one_pattern(p) if p.startswith("!"): hosts = [h for h in hosts if h not in frozenset(that)] elif p.startswith("&"): hosts = [h for h in hosts if h in frozenset(that)] else: hosts.extend([h for h in that if h.name not in frozenset([y.name for y in hosts])]) return hosts def _match_one_pattern(self, pattern): """ Takes a single pattern and returns a list of matching host names. Ignores intersection (&) and exclusion (!) specifiers. The pattern may be: 1. A regex starting with ~, e.g. '~[abc]*' 2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*' 3. An ordinary word that matches itself only, e.g. 'foo' The pattern is matched using the following rules: 1. If it's 'all', it matches all hosts in all groups. 2. Otherwise, for each known group name: (a) if it matches the group name, the results include all hosts in the group or any of its children. (b) otherwise, if it matches any hosts in the group, the results include the matching hosts. This means that 'foo*' may match one or more groups (thus including all hosts therein) but also hosts in other groups. The built-in groups 'all' and 'ungrouped' are special. No pattern can match these group names (though 'all' behaves as though it matches, as described above). The word 'ungrouped' can match a host of that name, and patterns like 'ungr*' and 'al*' can match either hosts or groups other than all and ungrouped. If the pattern matches one or more group names according to these rules, it may have an optional range suffix to select a subset of the results. This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does not work (the [1] is interpreted as part of the regex), but 'foo*[1]' would work if 'foo*' matched the name of one or more groups. Duplicate matches are always eliminated from the results. """ if pattern.startswith("&") or pattern.startswith("!"): pattern = pattern[1:] if pattern not in self._pattern_cache: (expr, slice) = self._split_subscript(pattern) hosts = self._enumerate_matches(expr) try: hosts = self._apply_subscript(hosts, slice) except IndexError: raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern) self._pattern_cache[pattern] = hosts return self._pattern_cache[pattern] def _split_subscript(self, pattern): """ Takes a pattern, checks if it has a subscript, and returns the pattern without the subscript and a (start,end) tuple representing the given subscript (or None if there is no subscript). Validates that the subscript is in the right syntax, but doesn't make sure the actual indices make sense in context. """ # Do not parse regexes for enumeration info if pattern.startswith('~'): return (pattern, None) # We want a pattern followed by an integer or range subscript. # (We can't be more restrictive about the expression because the # fnmatch semantics permit [\[:\]] to occur.) pattern_with_subscript = re.compile( r'''^ (.+) # A pattern expression ending with... \[(?: # A [subscript] expression comprising: (-?[0-9]+)| # A single positive or negative number ([0-9]+)([:-]) # Or an x:y or x: range. ([0-9]*) )\] $ ''', re.X ) subscript = None m = pattern_with_subscript.match(pattern) if m: (pattern, idx, start, sep, end) = m.groups() if idx: subscript = (int(idx), None) else: if not end: end = -1 subscript = (int(start), int(end)) if sep == '-': display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed") return (pattern, subscript) def _apply_subscript(self, hosts, subscript): """ Takes a list of hosts and a (start,end) tuple and returns the subset of hosts based on the subscript (which may be None to return all hosts). """ if not hosts or not subscript: return hosts (start, end) = subscript if end: if end == -1: end = len(hosts) - 1 return hosts[start:end + 1] else: return [hosts[start]] def _enumerate_matches(self, pattern): """ Returns a list of host names matching the given pattern according to the rules explained above in _match_one_pattern. """ results = [] # check if pattern matches group matching_groups = self._match_list(self._inventory.groups, pattern) if matching_groups: for groupname in matching_groups: results.extend(self._inventory.groups[groupname].get_hosts()) # check hosts if no groups matched or it is a regex/glob pattern if not matching_groups or pattern.startswith('~') or any(special in pattern for special in ('.', '?', '*', '[')): # pattern might match host matching_hosts = self._match_list(self._inventory.hosts, pattern) if matching_hosts: for hostname in matching_hosts: results.append(self._inventory.hosts[hostname]) if not results and pattern in C.LOCALHOST: # get_host autocreates implicit when needed implicit = self._inventory.get_host(pattern) if implicit: results.append(implicit) # Display warning if specified host pattern did not match any groups or hosts if not results and not matching_groups and pattern != 'all': display.warning("Could not match supplied host pattern, ignoring: %s" % pattern) return results def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ # FIXME: cache? result = [h for h in self.get_hosts(pattern)] # allow implicit localhost if pattern matches and no other results if len(result) == 0 and pattern in C.LOCALHOST: result = [pattern] return result def list_groups(self): # FIXME: cache? return sorted(self._inventory.groups.keys(), key=lambda x: x) def restrict_to_hosts(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to batch serial operations in main playbook code, don't use this for other reasons. """ if restriction is None: return elif not isinstance(restriction, list): restriction = [restriction] self._restriction = [h.name for h in restriction] def subset(self, subset_pattern): """ Limits inventory results to a subset of inventory that matches a given pattern, such as to select a given geographic of numeric slice amongst a previous 'hosts' selection that only select roles, or vice versa. Corresponds to --limit parameter to ansible-playbook """ if subset_pattern is None: self._subset = None else: subset_patterns = split_host_pattern(subset_pattern) results = [] # allow Unix style @filename data for x in subset_patterns: if x.startswith("@"): fd = open(x[1:]) results.extend(fd.read().split("\n")) fd.close() else: results.append(x) self._subset = results def remove_restriction(self): """ Do not restrict list operations """ self._restriction = None def clear_pattern_cache(self): self._pattern_cache = {}
konstruktoid/ansible-upstream
lib/ansible/inventory/manager.py
Python
gpl-3.0
23,980
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_provision short_description: Manage BIG-IP module provisioning description: - Manage BIG-IP module provisioning. This module will only provision at the standard levels of Dedicated, Nominal, and Minimum. version_added: 2.4 options: module: description: - The module to provision in BIG-IP. required: true choices: - am - afm - apm - asm - avr - cgnat - fps - gtm - ilx - lc - ltm - pem - sam - swg - vcmp aliases: - name level: description: - Sets the provisioning level for the requested modules. Changing the level for one module may require modifying the level of another module. For example, changing one module to C(dedicated) requires setting all others to C(none). Setting the level of a module to C(none) means that the module is not activated. - This parameter is not relevant to C(cgnat) and will not be applied to the C(cgnat) module. default: nominal choices: - dedicated - nominal - minimum state: description: - The state of the provisioned module on the system. When C(present), guarantees that the specified module is provisioned at the requested level provided that there are sufficient resources on the device (such as physical RAM) to support the provisioned module. When C(absent), de-provision the module. default: present choices: - present - absent extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Provision PEM at "nominal" level bigip_provision: module: pem level: nominal provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Provision a dedicated SWG. This will unprovision every other module bigip_provision: module: swg level: dedicated provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost ''' RETURN = r''' level: description: The new provisioning level of the module. returned: changed type: string sample: minimum ''' import time from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.icontrol import TransactionContextManager except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.icontrol import TransactionContextManager class Parameters(AnsibleF5Parameters): api_attributes = ['level'] returnables = ['level'] updatables = ['level', 'cgnat'] @property def level(self): if self._values['level'] is None: return None if self.state == 'absent': return 'none' return str(self._values['level']) class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result except Exception: return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def cgnat(self): if self.want.module == 'cgnat': if self.want.state == 'absent' and self.have.enabled is True: return True if self.want.state == 'present' and self.have.disabled is True: return True class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.have = None self.want = ModuleParameters(params=self.module.params) self.changes = UsableChanges() def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def present(self): if self.exists(): return False return self.update() def exists(self): if self.want.module == 'cgnat': uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'disabled' in response and response['disabled'] is True: return False elif 'enabled' in response and response['enabled'] is True: return True try: for x in range(0, 5): uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if str(response['level']) != 'none' and self.want.level == 'none': return True if str(response['level']) == 'none' and self.want.level == 'none': return False if str(response['level']) == self.want.level: return True return False except Exception as ex: if 'not registered' in str(ex): return False time.sleep(1) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True result = self.update_on_device() if self.want.module == 'cgnat': return result self._wait_for_module_provisioning() if self.want.module == 'vcmp': self._wait_for_reboot() self._wait_for_module_provisioning() if self.want.module == 'asm': self._wait_for_asm_ready() if self.want.module == 'afm': self._wait_for_afm_ready() return True def should_reboot(self): for x in range(0, 24): try: uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], 'provision.action' ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if response['value'] == 'reboot': return True elif response['value'] == 'none': time.sleep(5) except Exception: time.sleep(5) return False def reboot_device(self): nops = 0 last_reboot = self._get_last_reboot() try: params = dict( command="run", utilCmdArgs='-c "/sbin/reboot"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) except Exception: pass # Sleep a little to let rebooting take effect time.sleep(20) while nops < 3: try: self.client.reconnect() next_reboot = self._get_last_reboot() if next_reboot is None: nops = 0 if next_reboot == last_reboot: nops = 0 else: nops += 1 except Exception as ex: # This can be caused by restjavad restarting. pass time.sleep(10) return None def should_update(self): result = self._update_changed_options() if result: return True return False def update_on_device(self): if self.want.module == 'cgnat': if self.changes.cgnat: return self.provision_cgnat_on_device() return False elif self.want.level == 'dedicated': self.provision_dedicated_on_device() else: self.provision_non_dedicated_on_device() def provision_cgnat_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) params = dict(enabled=True) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def provision_dedicated_on_device(self): params = self.want.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/provision/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) resources = [x['name'] for x in response['items'] if x['name'] != self.want.module] with TransactionContextManager(self.client) as transact: for resource in resources: target = uri + resource resp = transact.api.patch(target, json=dict(level='none')) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) target = uri + self.want.module resp = transact.api.patch(target, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def provision_non_dedicated_on_device(self): params = self.want.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): if self.want.module == 'cgnat': uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) else: uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.module.check_mode: return True if self.want.module == 'cgnat': return self.deprovision_cgnat_on_device() self.remove_from_device() self._wait_for_module_provisioning() # For vCMP, because it has to reboot, we also wait for mcpd to become available # before "moving on", or else the REST API would not be available and subsequent # Tasks would fail. if self.want.module == 'vcmp': self._wait_for_reboot() self._wait_for_module_provisioning() if self.should_reboot(): self.save_on_device() self.reboot_device() self._wait_for_module_provisioning() if self.exists(): raise F5ModuleError("Failed to de-provision the module") return True def save_on_device(self): command = 'tmsh save sys config' params = dict( command="run", utilCmdArgs='-c "{0}"'.format(command) ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.patch(uri, json=dict(level='none')) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def deprovision_cgnat_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) params = dict(disabled=True) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def _wait_for_module_provisioning(self): # To prevent things from running forever, the hack is to check # for mprov's status twice. If mprov is finished, then in most # cases (not ASM) the provisioning is probably ready. nops = 0 # Sleep a little to let provisioning settle and begin properly time.sleep(5) while nops < 3: try: if not self._is_mprov_running_on_device(): nops += 1 else: nops = 0 except Exception: # This can be caused by restjavad restarting. try: self.client.reconnect() except Exception: pass time.sleep(5) def _is_mprov_running_on_device(self): # /usr/libexec/qemu-kvm is added here to prevent vcmp provisioning # from never allowing the mprov provisioning to succeed. # # It turns out that the 'mprov' string is found when enabling vcmp. The # qemu-kvm command that is run includes it. # # For example, # /usr/libexec/qemu-kvm -rt-usecs 880 ... -mem-path /dev/mprov/vcmp -f5-tracing ... # try: command = "ps aux | grep \'[m]prov\' | grep -v /usr/libexec/qemu-kvm" params = dict( command="run", utilCmdArgs='-c "{0}"'.format(command) ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return True except Exception: pass return False def _wait_for_asm_ready(self): """Waits specifically for ASM On older versions, ASM can take longer to actually start up than all the previous checks take. This check here is specifically waiting for the Policies API to stop raising errors :return: """ nops = 0 restarted_asm = False while nops < 3: try: uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if len(response['items']) >= 0: nops += 1 else: nops = 0 except Exception as ex: if not restarted_asm: self._restart_asm() restarted_asm = True time.sleep(5) def _wait_for_afm_ready(self): """Waits specifically for AFM AFM can take longer to actually start up than all the previous checks take. This check here is specifically waiting for the Security API to stop raising errors. :return: """ nops = 0 while nops < 3: try: uri = "https://{0}:{1}/mgmt/tm/security/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if len(response['items']) >= 0: nops += 1 else: nops = 0 except Exception as ex: pass time.sleep(5) def _restart_asm(self): try: params = dict( command="run", utilCmdArgs='-c "bigstart restart asm"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) time.sleep(60) return True except Exception: pass return None def _get_last_reboot(self): try: params = dict( command="run", utilCmdArgs='-c "/usr/bin/last reboot | head -1"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) except Exception: pass return None def _wait_for_reboot(self): nops = 0 last_reboot = self._get_last_reboot() # Sleep a little to let provisioning settle and begin properly time.sleep(5) while nops < 6: try: self.client.reconnect() next_reboot = self._get_last_reboot() if next_reboot is None: nops = 0 if next_reboot == last_reboot: nops = 0 else: nops += 1 except Exception as ex: # This can be caused by restjavad restarting. pass time.sleep(10) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( module=dict( required=True, choices=[ 'afm', 'am', 'sam', 'asm', 'avr', 'fps', 'gtm', 'lc', 'ltm', 'pem', 'swg', 'ilx', 'apm', 'vcmp', 'cgnat' ], aliases=['name'] ), level=dict( default='nominal', choices=['nominal', 'dedicated', 'minimum'] ), state=dict( default='present', choices=['present', 'absent'] ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['parameters', 'parameters_src'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() exit_json(module, results, client) except F5ModuleError as ex: fail_json(module, ex, client) if __name__ == '__main__': main()
veger/ansible
lib/ansible/modules/network/f5/bigip_provision.py
Python
gpl-3.0
29,288
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from frappe.model.document import Document class LostReasonDetail(Document): pass
mhbu50/erpnext
erpnext/crm/doctype/lost_reason_detail/lost_reason_detail.py
Python
gpl-3.0
206
from __future__ import absolute_import, division, unicode_literals from collections.abc import Mapping class Trie(Mapping): """Abstract base class for tries""" def keys(self, prefix=None): # pylint:disable=arguments-differ keys = super(Trie, self).keys() if prefix is None: return set(keys) return {x for x in keys if x.startswith(prefix)} def has_keys_with_prefix(self, prefix): for key in self.keys(): if key.startswith(prefix): return True return False def longest_prefix(self, prefix): if prefix in self: return prefix for i in range(1, len(prefix) + 1): if prefix[:-i] in self: return prefix[:-i] raise KeyError(prefix) def longest_prefix_item(self, prefix): lprefix = self.longest_prefix(prefix) return (lprefix, self[lprefix])
unreal666/outwiker
plugins/webpage/webpage/libs/html5lib/_trie/_base.py
Python
gpl-3.0
934
# -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- # # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # from __future__ import unicode_literals import sys, os, uno, unohelper import re, random, traceback, itertools import threading, time as __time__ try: unicode next = lambda l: l.next() # python 2 except: unicode, long = str, int # support python 3 urebootstrap = os.environ["URE_BOOTSTRAP"] if "vnd.sun.star.pathname" in urebootstrap: __lngpath__ = re.sub(r"^vnd.sun.star.pathname:(.*)program(/|\\)fundamental([.]ini|rc)$", "\\1", urebootstrap) else: __lngpath__ = unohelper.fileUrlToSystemPath(re.sub("program/(fundamental.ini|fundamentalrc)$", "", urebootstrap)) __lngpath__ = __lngpath__ + "share/Scripts/python/LibreLogo/".replace("/", os.sep) __translang__ = "am|ca|cs|de|dk|el|en|eo|es|et|fr|hu|it|ja|nl|no|pl|pt|ru|se|sl" # FIXME supported languages for language guessing, expand this list, according to the localizations __lng__ = {} __docs__ = {} __prevcode__ = None __prevlang__ = None __prevcompiledcode__ = None __thread__ = None __lock__ = threading.Lock() __halt__ = False __compiled__ = "" __group__ = 0 __groupstack__ = [] __grouplefthang__ = 0 __comp__ = {} __strings__ = [] __colors__ = {} __COLORS__ = ['BLACK', 0x000000], ['SILVER', 0xc0c0c0], ['GRAY', 0x808080], \ ['WHITE', 0xffffff], ['MAROON', 0x800000], ['RED', 0xff0000], \ ['PURPLE', 0x800080], ['FUCHSIA', 0xff00ff], ['GREEN', 0x008000], \ ['LIME', 0x00ff00], ['OLIVE', 0x808000], ['YELLOW', 0xffff00], \ ['NAVY', 0x000080], ['BLUE', 0x0000ff], ['TEAL', 0x008080], \ ['AQUA', 0x00ffff], ['PINK', 0xffc0cb], ['TOMATO', 0xff6347], \ ['ORANGE', 0xffa500], ['GOLD', 0xffd700], ['VIOLET', 0x9400d3], \ ['SKYBLUE', 0x87ceeb], ['CHOCOLATE', 0xd2691e], ['BROWN', 0xa52a2a], \ ['INVISIBLE', 0xffffffff] __NORMCOLORS__ = [[[255, 255, 0], 0, -11, 1, -11], [[255, 128, 0], 1, 116, 1, -33], [[255, 0, 0], 1, 95, 2, 42], [[255, 0, 255], 2, -213, 0, -106], [[0, 0, 255], 0, 148, 1, 127], [[0, 255, 255], 1, -128, 2, -63], [[0, 255, 0], 2, 192, 0, 244]] __STRCONST__ = [i[0] for i in __COLORS__] + ['NONE', 'BEVEL', 'MITER', 'ROUNDED', 'SOLID', 'DASH', 'DOTTED', 'BOLD', 'ITALIC', 'UPRIGHT', 'NORMAL', "HOUR", "PT", "INCH", "MM", "CM"] __SLEEP_SLICE_IN_MILLISECONDS__ = 500 __PT_TO_TWIP__ = 20 __MM_TO_PT__ = 1/(25.4/72) __MM10_TO_TWIP__ = 1/(2540.0/72/20) # 0.01 mm to twentieth point __FILLCOLOR__ = 0x8000cc00 __LINEWIDTH__ = 0.5 * __PT_TO_TWIP__ __ENCODED_STRING__ = "_s_%s___" __ENCODED_COMMENT__ = "_c_%s___" __DECODE_STRING_REGEX__ = "_s_([0-9]+)___" __DECODE_COMMENT_REGEX__ = "_c_([0-9]+)___" __LINEBREAK__ = "#_@L_i_N_e@_#" __TURTLE__ = "turtle" __ACTUAL__ = "actual" __BASEFONTFAMILY__ = "Linux Biolinum G" __LineStyle_DOTTED__ = 2 class __Doc__: def __init__(self, doc): self.doc = doc try: self.drawpage = doc.DrawPage # Writer except: self.drawpage = doc.DrawPages.getByIndex(0) # Draw, Impress self.shapecache = {} self.shapecount = itertools.count() self.time = 0 self.zoomvalue = 0 self.initialize() def initialize(self): self.pen = 1 self.pencolor = 0 self.pensize = __LINEWIDTH__ self.linestyle = __LineStyle_SOLID__ self.linejoint = __ROUNDED__ self.linecap = __Cap_NONE__ self.oldlc = 0 self.oldlw = 0 self.oldls = __LineStyle_SOLID__ self.oldlj = __ROUNDED__ self.continuous = True self.areacolor = __FILLCOLOR__ self.t10y = int((__FILLCOLOR__ >> 24) / (255.0/100)) self.hatch = None self.textcolor = 0 self.fontfamily = __BASEFONTFAMILY__ self.fontheight = 12 self.fontweight = 100 self.fontstyle = 0 from math import pi, sin, cos, asin, sqrt, log10 from com.sun.star.awt import Point as __Point__ from com.sun.star.awt import Gradient as __Gradient__ from com.sun.star.awt.GradientStyle import LINEAR as __GradientStyle_LINEAR__ from com.sun.star.drawing import LineDash as __LineDash__ from com.sun.star.drawing import Hatch as __Hatch__ from com.sun.star.drawing import PolyPolygonBezierCoords as __Bezier__ from com.sun.star.text.TextContentAnchorType import AT_PAGE as __AT_PAGE__ from com.sun.star.text.WrapTextMode import THROUGHT as __THROUGHT__ from com.sun.star.drawing.LineCap import BUTT as __Cap_NONE__ from com.sun.star.drawing.LineCap import ROUND as __Cap_ROUND__ from com.sun.star.drawing.LineCap import SQUARE as __Cap_SQUARE__ from com.sun.star.drawing.LineJoint import NONE as __Joint_NONE__ from com.sun.star.drawing.LineJoint import BEVEL as __BEVEL__ from com.sun.star.drawing.LineJoint import MITER as __MITER__ from com.sun.star.drawing.LineJoint import ROUND as __ROUNDED__ from com.sun.star.drawing.FillStyle import NONE as __FillStyle_NONE__ from com.sun.star.drawing.FillStyle import GRADIENT as __FillStyle_GRADIENT__ from com.sun.star.drawing.LineStyle import NONE as __LineStyle_NONE__ from com.sun.star.drawing.LineStyle import SOLID as __LineStyle_SOLID__ from com.sun.star.drawing.LineStyle import DASH as __LineStyle_DASHED__ from com.sun.star.drawing.DashStyle import RECT as __DashStyle_RECT__ from com.sun.star.drawing.DashStyle import ROUND as __DashStyle_ROUND__ from com.sun.star.drawing.DashStyle import ROUNDRELATIVE as __DashStyle_ROUNDRELATIVE__ from com.sun.star.drawing.CircleKind import FULL as __FULL__ from com.sun.star.drawing.CircleKind import SECTION as __SECTION__ from com.sun.star.drawing.CircleKind import CUT as __CUT__ from com.sun.star.drawing.CircleKind import ARC as __ARC__ from com.sun.star.awt.FontSlant import NONE as __Slant_NONE__ from com.sun.star.awt.FontSlant import ITALIC as __Slant_ITALIC__ from com.sun.star.awt import Size as __Size__ from com.sun.star.awt import WindowDescriptor as __WinDesc__ from com.sun.star.awt.WindowClass import MODALTOP as __MODALTOP__ from com.sun.star.awt.VclWindowPeerAttribute import OK as __OK__ from com.sun.star.awt.VclWindowPeerAttribute import OK_CANCEL as __OK_CANCEL__ from com.sun.star.awt.VclWindowPeerAttribute import YES_NO_CANCEL as __YES_NO_CANCEL__ # OK_CANCEL, YES_NO, RETRY_CANCEL, DEF_OK, DEF_CANCEL, DEF_RETRY, DEF_YES, DEF_NO from com.sun.star.awt.PushButtonType import OK as __Button_OK__ from com.sun.star.awt.PushButtonType import CANCEL as __Button_CANCEL__ from com.sun.star.util.MeasureUnit import APPFONT as __APPFONT__ from com.sun.star.beans import PropertyValue as __property__ from com.sun.star.lang import Locale def __getprop__(name, value): p, p.Name, p.Value = __property__(), name, value return p __uilocale__ = uno.getComponentContext().getValueByName("/singletons/com.sun.star.configuration.theDefaultProvider").\ createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess",\ (__getprop__("nodepath", "/org.openoffice.Setup/L10N"),)).getByName("ooLocale") + '-' # handle missing Country of locale 'eo' def __l12n__(lng): try: return __lng__[lng] except: try: __lng__[lng] = dict([[i.decode("unicode-escape").split("=")[0].strip(), i.decode("unicode-escape").split("=")[1].strip().strip("|")] for i in open(__lngpath__ + "LibreLogo_" + lng + ".properties", 'rb').readlines() if b"=" in i]) return __lng__[lng] except Exception: __trace__() return None # dot for dotted line (implemented as an array of dot-headed arrows, because PostScript dot isn't supported by Writer) def __gendots__(n): return [__Point__(round(sin(360.0/n * i * pi/180.0) * 600), round(cos(360.0/n * i * pi/180) * 600)) for i in range(n)] __bezierdot__ = __Bezier__() __bezierdot__.Coordinates = (tuple(__gendots__(32)),) __bezierdot__.Flags = ((0,) * 32,) # turtle shape __TURTLESHAPE__ = [tuple([(__Point__(-120, 130), __Point__(-245, 347), __Point__(-291, 176), ), (__Point__(0, -500), __Point__(126, -375), __Point__(0, -250), __Point__(-124, -375), ), (__Point__(295, 170), __Point__(124, 124), __Point__(250, 340), ), (__Point__(466, -204), __Point__(224, -269), __Point__(71, -180), __Point__(313, -116), ), (__Point__(-75, -175), __Point__(-292, -300), __Point__(-417, -83), ), (__Point__(250, 0), __Point__(0, -250), __Point__(-250, 0), __Point__(0, 250), )] + [(i,) for i in __gendots__(32)] + # single points for wider selection [(__Point__(0, 0),)]), # last point for position handling ((__Point__(0, 0),),)] # hidden turtle (single point to draw at the left border of the page area) def __getdocument__(): global __docs__, _ doc = XSCRIPTCONTEXT.getDocument() try: _ = __docs__[doc.RuntimeUID] except: _ = __Doc__(doc) __docs__[doc.RuntimeUID] = _ # input function, result: input string or 0 def Input(s): global __halt__ try: ctx = uno.getComponentContext() smgr = ctx.ServiceManager text = "" # dialog d = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialogModel", ctx) ps = _.doc.CurrentController.Frame.ContainerWindow.getPosSize() lo = _.doc.CurrentController.Frame.ContainerWindow.convertSizeToLogic(__Size__(ps.Width, ps.Height), __APPFONT__) d.PositionX, d.PositionY, d.Width, d.Height = lo.Width/2 - 75, lo.Height/2 - 25, 150, 50 # label l = d.createInstance("com.sun.star.awt.UnoControlFixedTextModel" ) if type(s) == list: text = s[1] s = s[0] l.PositionX, l.PositionY, l.Width, l.Height, l.Name, l.TabIndex, l.Label = 5, 4, 140, 14, "l1", 2, s # textbox or combobox e = d.createInstance("com.sun.star.awt.UnoControlEditModel") e.PositionX, e.PositionY, e.Width, e.Height, e.Name, e.TabIndex = 5, 14, 140, 12, "e1", 0 # buttons b = d.createInstance( "com.sun.star.awt.UnoControlButtonModel" ) b.PositionX, b.PositionY, b.Width, b.Height, b.Name, b.TabIndex, b.PushButtonType, b.DefaultButton = 55, 32, 45, 14, "b1", 1, __Button_OK__, True b2 = d.createInstance( "com.sun.star.awt.UnoControlButtonModel" ) b2.PositionX, b2.PositionY, b2.Width, b2.Height, b2.Name, b2.TabIndex, b2.PushButtonType = 100, 32, 45, 14, "b2", 1, __Button_CANCEL__ # insert the control models into the dialog model d.insertByName( "l1", l) d.insertByName( "b1", b) d.insertByName( "b2", b2) d.insertByName( "e1", e) # create the dialog control and set the model controlContainer = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialog", ctx) controlContainer.setModel(d) # create a peer toolkit = smgr.createInstanceWithContext("com.sun.star.awt.ExtToolkit", ctx) controlContainer.setVisible(False) controlContainer.createPeer(toolkit, None) # execute it inputtext = controlContainer.execute() if inputtext: inputtext = e.Text else: __halt__ = True # dispose the dialog controlContainer.dispose() return inputtext except Exception: __trace__() def __string__(s, decimal = None): # convert decimal sign, localized BOOL and SET if not decimal: decimal = _.decimal if decimal == ',' and type(s) == float: return str(s).replace(".", ",") if type(s) in [list, tuple, dict, set]: __strings__ = [] s = re.sub("(?u)(['\"])(([^'\"]|\\['\"])*)(?<!\\\\)\\1", __encodestring__, str(s)) # XXX fix double '\'\"' if decimal == ',': s = s.replace(".", ",") return re.sub(__DECODE_STRING_REGEX__, __decodestring__, \ s.replace('set', __locname__('SET')).replace('True', __locname__('TRUE')).replace('False', __locname__('FALSE'))) if type(s) in [str, unicode]: return s elif type(s) == bool: return __locname__(str(s).upper()) return str(s) def Print(s): global __halt__ s = __string__(s, _.decimal) if not MessageBox(_.doc.CurrentController.Frame.ContainerWindow, s[:500] + s[500:5000].replace('\n', ' '), "", "messbox", __OK_CANCEL__): __halt__ = True def MessageBox(parent, message, title, msgtype = "messbox", buttons = __OK__): msgtypes = ("messbox", "infobox", "errorbox", "warningbox", "querybox") if not (msgtype in msgtypes): msgtype = "messbox" d = __WinDesc__() d.Type = __MODALTOP__ d.WindowServiceName = msgtype d.ParentIndex = -1 d.Parent = parent d.WindowAttributes = buttons tk = parent.getToolkit() msgbox = tk.createWindow(d) msgbox.MessageText = message if title: msgbox.CaptionText = title return msgbox.execute() def Random(r): try: return r * random.random() except: return list(r)[int(random.random() * len(r))] def to_ascii(s): return s.encode("unicode-escape").decode("utf-8").replace("\\u", "__u__").replace(r"\x", "__x__") def to_unicode(s): return bytes(s.replace("__x__", r"\x").replace("__u__", "\\u"), "ascii").decode("unicode-escape") def __trace__(): if 'PYUNO_LOGLEVEL' in os.environ: print(traceback.format_exc()) def __locname__(name, l = -1): if l == -1: l = _.lng for i in __l12n__(l): if i == name.upper(): return __l12n__(l)[i].split("|")[0] # return with the first localized name return to_unicode(name) def __getcursor__(fulltext): realselection = False try: text = _.doc.getCurrentController().getViewCursor().getText().createTextCursor() # copy selection (also in frames) text.gotoRange(_.doc.getCurrentController().getViewCursor(), False) if fulltext: 1/len(text.getString()) # exception, if zero length realselection = True except: text = _.doc.getText().createTextCursorByRange(_.doc.getText().getStart()) text.gotoEnd(True) return text, realselection def __translate__(arg = None): global _ __getdocument__() selection = __getcursor__(True)[0] __initialize__() __setlang__() # detect language text = selection.getString() # remove comments and strings text = re.sub(r"[ ]*;[^\n]*", "", re.sub(r"['„“‘«»「][^\n'”“‘’«»」]*['”“‘’«»」]", "", re.sub(r"^[ \t]*[;#][^\n]*", "", text))) text = " ".join(set(re.findall("(?u)\w+", text)) - set(re.findall("(?u)\w*\d+\w*", text))).lower() # only words ctx = uno.getComponentContext() guess = ctx.ServiceManager.createInstanceWithContext("com.sun.star.linguistic2.LanguageGuessing", ctx) guess.disableLanguages(guess.getEnabledLanguages()) guess.enableLanguages(tuple([Locale(i, "", "") for i in __translang__.split("|")])) guess = guess.guessPrimaryLanguage(text, 0, len(text)) try: l = {'cs': 'cs_CZ', 'el': 'el_GR', 'en': 'en_US', 'pt': 'pt_BR'}[guess.Language] except: l = guess.Language + '_' + guess.Language.upper() lang = __l12n__(l) if not lang: lang = __l12n__(guess.Language) if not lang: lang = __l12n__(_.lng) if not lang: lang = __l12n__("en_US") lq = '\'' + lang['LEFTSTRING'].replace("|", "") rq = '\'' + lang['RIGHTSTRING'].replace("|", "") __strings__ = [] text = re.sub(r"^(([ \t]*[;#][^\n]*))", __encodecomment__, text) text = re.sub("(?u)([%s])([^\n%s]*)(?<!\\\\)[%s]" % (lq, rq, rq), __encodestring__, selection.getString()) text = re.sub('(?u)(?<![0-9])(")(~?\w*)', __encodestring__, text) text = re.sub(r";(([^\n]*))", __encodecomment__, text) # translate the program to the language of the document FIXME space/tab exception = ['DECIMAL'] in1 = lang['IN'].upper() in2 = __l12n__(_.lng)['IN'].split("|")[0].upper() if in1[0] == '-' and in2[0] != '-': # "for x y-in" -> "for x in y" exception += ['IN'] text = re.sub(r"(?ui)\b((?:%s) +:?\w+) +([^\n]+)(?:%s) +(?=[[] |[[]\n)" % (lang['FOR'], in1), "\\1 %s \\2 " % in2, text) text = re.sub(r"(?ui)(:?\b\w+|[[][^[\n]*])\b(?:%s)\b" % in1, "%s \\1" % in2, text) elif in1[0] != '-' and in2[0] == '-': # "for x in y" -> "for x y-in" exception += ['IN'] text = re.sub(r"(?ui)(?<=\n)((?:%s)\b +:?\w+) +(?:%s) +([^\n]+?) +(?=[[] |[[]\n)" % (lang['FOR'], in1), "\\1 \\2%s " % in2, text) text = re.sub(r"(?ui)(?<!:)\b(?:%s) +(:?\b\w+|[[][^[\n]*])\b" % in1, "\\1%s" % in2, text) for i in set(lang) - set(exception): text = re.sub(r'(?ui)(?<!:)\b(%s)\b' % lang[i], __l12n__(_.lng)[i].split("|")[0].upper(), text) text = re.sub(r"(?<=\d)[%s](?=\d)" % lang['DECIMAL'], __l12n__(_.lng)['DECIMAL'], text) # decode strings and comments quoted = u"(?ui)(?<=%s)(%%s)(?=%s)" % (__l12n__(_.lng)['LEFTSTRING'][0], __l12n__(_.lng)['RIGHTSTRING'][0]) text = re.sub(__DECODE_STRING_REGEX__, __decodestring2__, text) for i in __STRCONST__: text = re.sub(quoted % lang[i], __l12n__(_.lng)[i].split("|")[0].upper(), text) text = re.sub(__DECODE_COMMENT_REGEX__, __decodecomment__, text) if _.doc.getText().compareRegionStarts(selection.getStart(), _.doc.getText().getStart()) == 0: pagebreak = True selection.setString("\n" + text.lstrip("\n")) else: pagebreak = False selection.setString(text) # convert to paragraphs __dispatcher__(".uno:ExecuteSearch", (__getprop__("SearchItem.SearchString", r"\n"), __getprop__("SearchItem.ReplaceString", r"\n"), \ __getprop__("Quiet", True), __getprop__("SearchItem.Command", 3), __getprop__("SearchItem.StyleFamily", 2), \ __getprop__("SearchItem.AlgorithmType", 1), __getprop__("SearchItem.RowDirection", 1), __getprop__("SearchItem.SearchFlags", 65536))) # set 2-page layout if pagebreak: selection.getStart().BreakType = 4 __dispatcher__(".uno:ZoomPage") class LogoProgram(threading.Thread): def __init__(self, code): self.code = code threading.Thread.__init__(self) def run(self): global __thread__ try: exec(self.code) if _.origcursor[0] and _.origcursor[1]: __dispatcher__(".uno:Escape") try: _.doc.CurrentController.getViewCursor().gotoRange(_.origcursor[0], False) except: _.doc.CurrentController.getViewCursor().gotoRange(_.origcursor[0].getStart(), False) except Exception as e: try: TRACEPATTERN = '"<string>", line ' message = traceback.format_exc() l = re.findall(TRACEPATTERN + '[0-9]+', message) if len(l) > 0 and not "SystemExit" in message: line = len(re.findall(__LINEBREAK__, ''.join(self.code.split("\n")[:int(l[-1][len(TRACEPATTERN):])]))) + 1 caption = __l12n__(_.lng)['LIBRELOGO'] if __prevcode__ and "\n" in __prevcode__: __gotoline__(line) caption = __l12n__(_.lng)['ERROR'] % line parent = _.doc.CurrentController.Frame.ContainerWindow if "maximum recursion" in message: MessageBox(parent, __l12n__(_.lng)['ERR_STOP'] + " " + __l12n__(_.lng)['ERR_MAXRECURSION'] % sys.getrecursionlimit(), __l12n__(_.lng)['LIBRELOGO']) elif "cannot initialize memory" in message or "Couldn't instantiate" in message: MessageBox(parent, __l12n__(_.lng)['ERR_STOP'] + " " + __l12n__(_.lng)['ERR_MEMORY'], __l12n__(_.lng)['LIBRELOGO']) elif "ZeroDivisionError" in message: MessageBox(parent, __l12n__(_.lng)['ERR_ZERODIVISION'], caption, "errorbox") elif "IndexError" in message: MessageBox(parent, __l12n__(_.lng)['ERR_INDEX'], caption, "errorbox") elif "KeyError" in message: MessageBox(parent, __l12n__(_.lng)['ERR_KEY'] % eval(re.search("KeyError: ([^\n]*)", message).group(1)), caption, "errorbox") elif "NameError" in message: if "__repeat__" in message: MessageBox(parent, __l12n__(_.lng)['ERR_ARGUMENTS'] % (__locname__('REPEAT'), 1, 0), caption, "errorbox") else: MessageBox(parent, __l12n__(_.lng)['ERR_NAME'] % \ to_unicode(re.search("(?<=name ')[\w_]*(?=')", message).group(0)), caption, "errorbox") elif "TypeError" in message and "argument" in message and "given" in message: r = re.search("([\w_]*)[(][)][^\n]* (\w+) arguments? [(](\d+)", message) # XXX later: handle 'no arguments' + plural MessageBox(parent, __l12n__(_.lng)['ERR_ARGUMENTS'] % (__locname__(r.group(1)), r.group(2), r.group(3)), caption, "errorbox") else: origline = __compiled__.split("\n")[line-1] if not "com.sun.star" in message and not "__repeat__" in message and not "*)" in message and ("[" in origline or "]" in origline): MessageBox(parent, __l12n__(_.lng)['ERR_BLOCK'], caption, "errorbox") else: MessageBox(parent, __l12n__(_.lng)['ERROR'] %line, __l12n__(_.lng)['LIBRELOGO'], "errorbox") __trace__() except: pass with __lock__: __thread__ = None def __encodestring__(m): __strings__.append(re.sub("\\[^\\]", "", m.group(2))) return __ENCODED_STRING__ % (len(__strings__) - 1) def __encodecomment__(m): __strings__.append(re.sub("\\[^\\]", "", m.group(2))) return __ENCODED_COMMENT__ % (len(__strings__) - 1) def __decodestring__(m): return "u'%s'" % __strings__[int(m.group(1))] def __decodestring2__(m): return __l12n__(_.lng)['LEFTSTRING'][0] + __strings__[int(m.group(1))] + __l12n__(_.lng)['RIGHTSTRING'][0] def __decodecomment__(m): return ";" + __strings__[int(m.group(1))] def __initialize__(): global __halt__, __thread__ __getdocument__() _.zoomvalue = _.doc.CurrentController.getViewSettings().ZoomValue shape = __getshape__(__TURTLE__) if not shape: shape = _.doc.createInstance( "com.sun.star.drawing.PolyPolygonShape" ) shape.AnchorType = __AT_PAGE__ shape.TextWrap = __THROUGHT__ shape.Opaque = True _.drawpage.add(shape) shape.PolyPolygon = __TURTLESHAPE__[0] _.shapecache[__TURTLE__] = shape shape.Name = __TURTLE__ _.initialize() turtlehome() _.doc.CurrentController.select(shape) shape.FillColor, transparence = __splitcolor__(_.areacolor, shape) shape.LineColor, shape.LineTransparence = __splitcolor__(_.pencolor) elif shape.Visible: if shape.FillStyle == __FillStyle_NONE__: _.areacolor = 0xffffffff else: _.areacolor = shape.FillColor + (int(255.0 * shape.FillTransparence/100) << 24) if shape.LineWidth != round((1 + _.pen * 2) * __PT_TO_TWIP__ / __MM10_TO_TWIP__) and shape.LineWidth != round(__LINEWIDTH__ / __MM10_TO_TWIP__): _.pensize = shape.LineWidth * __MM10_TO_TWIP__ if shape.LineStyle == __LineStyle_NONE__: # - none - __pen__(0) else: if shape.LineStyle == __LineStyle_SOLID__: __pen__(1) _.pencolor = shape.LineColor + (int(255.0 * shape.LineTransparence/100) << 24) shape.LineJoint = __ROUNDED__ shape.Shadow = True shape.FillColor, transparence = __splitcolor__(_.areacolor, shape) shape.FillTransparence = min(95, transparence) shape.ShadowColor, shape.ShadowTransparence, shape.ShadowXDistance, shape.ShadowYDistance = (0, 20, 0, 0) shape.LineWidth = min(_.pensize, (1 + _.pen * 2) * __PT_TO_TWIP__) / __MM10_TO_TWIP__ shape.SizeProtect = True def pagesize(n = -1): if n == -1: ps = _.doc.CurrentController.getViewCursor().PageStyleName page = _.doc.StyleFamilies.getByName("PageStyles").getByName(ps) return [page.Width * __MM10_TO_TWIP__ / __PT_TO_TWIP__, page.Height * __MM10_TO_TWIP__ / __PT_TO_TWIP__] return None def turtlehome(): turtle = __getshape__(__TURTLE__) if turtle: ps = _.doc.CurrentController.getViewCursor().PageStyleName page = _.doc.StyleFamilies.getByName("PageStyles").getByName(ps) turtle.setPosition(__Point__((page.Width - turtle.BoundRect.Width)/2, (page.Height - turtle.BoundRect.Height)/2)) turtle.LineStyle = __LineStyle_SOLID__ turtle.LineJoint = __MITER__ turtle.LineWidth = min(_.pensize, (1 + _.pen * 2) * __PT_TO_TWIP__) / __MM10_TO_TWIP__ turtle.LineColor, none = __splitcolor__(_.pencolor) turtle.LineTransparence = 25 turtle.RotateAngle = 0 turtle.ZOrder = 1000 def __pen__(n): _.pen = n turtle = __getshape__(__TURTLE__) if turtle: if n: turtle.LineStyle = __LineStyle_SOLID__ turtle.LineWidth = min(_.pensize, 3 * __PT_TO_TWIP__) / __MM10_TO_TWIP__ else: turtle.LineStyle = __LineStyle_DASHED__ turtle.LineDash = __LineDash__(__DashStyle_RECT__, 0, 0, 1, __PT_TO_TWIP__, __PT_TO_TWIP__) turtle.LineWidth = min(_.pensize, __PT_TO_TWIP__) / __MM10_TO_TWIP__ def __visible__(shape, visible = -1): # for OOo 3.2 compatibility try: if visible == -1: return shape.Visible shape.Visible = visible except: return True def hideturtle(): turtle = __getshape__(__TURTLE__) if turtle and turtle.Visible: z = turtle.getPosition() z = __Point__(z.X + turtle.BoundRect.Width / 2.0, z.Y + turtle.BoundRect.Height / 2.0) turtle.PolyPolygon = __TURTLESHAPE__[1] __visible__(turtle, False) turtle.LineTransparence, turtle.FillTransparence = 100, 100 # for saved files turtle.setPosition(z) __dispatcher__(".uno:Escape") def showturtle(): turtle = __getshape__(__TURTLE__) if turtle and not turtle.Visible: if not turtle.Parent: _.drawpage.add(turtle) z = turtle.getPosition() r, turtle.RotateAngle = turtle.RotateAngle, 0 turtle.PolyPolygon, turtle.RotateAngle = __TURTLESHAPE__[0], r z = __Point__(z.X - turtle.BoundRect.Width / 2.0, z.Y - turtle.BoundRect.Height / 2.0) turtle.setPosition(z) __visible__(turtle, True) pencolor(_.pencolor) fillcolor(_.areacolor) pensize(_.pensize/__PT_TO_TWIP__) _.doc.CurrentController.select(__getshape__(__TURTLE__)) elif not turtle: __initialize__() def left(arg=None): if __thread__: return None __initialize__() turtle = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') turtle.add(__getshape__(__TURTLE__)) _.doc.CurrentController.select(turtle) rotate(__TURTLE__, 1500) return None def right(arg=None): if __thread__: return None __initialize__() turtle = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') turtle.add(__getshape__(__TURTLE__)) _.doc.CurrentController.select(turtle) rotate(__TURTLE__, -1500) return None def goforward(arg=None): if __thread__: return None __initialize__() turtle = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') turtle.add(__getshape__(__TURTLE__)) _.doc.CurrentController.select(turtle) forward(10) return None def gobackward(arg=None): if __thread__: return None __initialize__() turtle = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') turtle.add(__getshape__(__TURTLE__)) _.doc.CurrentController.select(turtle) backward(10) return None def commandline(arg=None, arg2=None): run(arg, arg2) def __setlang__(): global _ c = _.doc.CurrentController.getViewCursor() locs = [i for i in [c.CharLocale, c.CharLocaleAsian, c.CharLocaleComplex] if i.Language != 'zxx'] # not None language # FIXME-BCP47: this needs adaption to language tags, a simple split on # '-' and assuming second field would be country would already fail if # a script tag was present. loc = Locale(__uilocale__.split('-')[0], __uilocale__.split('-')[1], '') if locs and loc not in locs: loc = locs[0] _.lng = loc.Language + '_' + loc.Country if not __l12n__(_.lng): _.lng = loc.Language if not __l12n__(_.lng): _.lng = "en_US" def run(arg=None, arg2 = -1): global _, __thread__, __halt__, _, __prevcode__, __prevlang__, __prevcompiledcode__ if __thread__: return None with __lock__: __thread__ = 1 try: __getdocument__() _.origcursor = [None, None] if arg2 == -1: _.origcursor, _.cursor = __getcursor__(False), __getcursor__(True)[0] __dispatcher__(".uno:Escape") c = _.doc.Text.createTextCursor() # go to the first page c.gotoStart(False) _.doc.CurrentController.getViewCursor().gotoRange(c, False) __initialize__() __setlang__() arg2 = _.cursor.getString() if len(arg2) > 20000: if MessageBox(_.doc.CurrentController.Frame.ContainerWindow, __l12n__(_.lng)['ERR_NOTAPROGRAM'], __l12n__(_.lng)['LIBRELOGO'], "querybox", __YES_NO_CANCEL__) != 2: with __lock__: __thread__ = None return None elif len(arg2) == 0 and _.origcursor[1]: _.origcursor[0].setString("fontcolor 'green'\nlabel 'LIBRE'\npu\nback 30\npic [\n\tfc any\n\tcircle 40\n\tfontcolor 'black'\n\tlabel 'LOGO'\n\tleft 180\n\tfd 20\n\tpd\n\tpc any\n\tps 1\n\tfd 40\n\trepeat 20 [\n\t\tfd repcount*2\n\t\trt 90\n\t]\n]\npu pos any pd") __translate__() _.origcursor, _.cursor = __getcursor__(False), __getcursor__(True)[0] arg2 = _.cursor.getString() else: __initialize__() __setlang__() if __prevcode__ and __prevcode__ == arg2 and __prevlang__ == _.lng: __thread__ = LogoProgram(__prevcompiledcode__) else: __prevcode__ = arg2 __prevlang__ = _.lng __prevcompiledcode__ = __compil__(arg2) __thread__ = LogoProgram(__prevcompiledcode__) __halt__ = False turtle = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') turtle.add(__getshape__(__TURTLE__)) _.doc.CurrentController.select(turtle) # set working directory for file operations if _.doc.hasLocation(): name = os.chdir(unohelper.fileUrlToSystemPath(re.sub("[^/]*$", "", _.doc.getURL()))) else: name = os.chdir(os.path.expanduser('~')) __thread__.start() except Exception as e: __thread__ = None __trace__() return None def stop(arg=None): global __halt__ with __lock__: __halt__ = True return None def home(arg=None): if __thread__: return None __getdocument__() turtle = __getshape__(__TURTLE__) if turtle: __removeshape__(__TURTLE__) _.drawpage.remove(turtle) __initialize__() __dispatcher__(".uno:Escape") if not __halt__: return None _.pencolor = 0 _.pensize = __LINEWIDTH__ _.areacolor = __FILLCOLOR__ pen = 1 __removeshape__(__ACTUAL__) def clearscreen(arg=None): if __thread__: return None __getdocument__() turtle = __getshape__(__TURTLE__) if not turtle: __initialize__() if not __halt__: # avoid unintentional image deletion in large documents if len(__getcursor__(True)[0].getString()) < 5000: __cs__(False) return __cs__(False) __dispatcher__(".uno:Escape") def __checkhalt__(): global __thread__, __halt__ if __halt__: with __lock__: __thread__ = None sys.exit() def __cs__(select = True): turtle = __getshape__(__TURTLE__) visible = False if turtle and turtle.Visible: __visible__(turtle, False) visible = True if _.doc.CurrentController.select(_.drawpage) and \ _.doc.CurrentController.getSelection().ImplementationName == "com.sun.star.drawing.SvxShapeCollection": __dispatcher__(".uno:Delete") if turtle and visible: __visible__(turtle, True) if select: _.doc.CurrentController.select(_.drawpage) def __dispatcher__(s, properties = (), doc = 0): ctx = XSCRIPTCONTEXT.getComponentContext() d = ctx.ServiceManager.createInstanceWithContext("com.sun.star.frame.DispatchHelper", ctx) if doc != 0: d.executeDispatch(doc.CurrentController.Frame, s, "", 0, properties) else: d.executeDispatch(_.doc.CurrentController.Frame, s, "", 0, properties) def __getshape__(shapename): try: if _.shapecache[shapename].Parent: return _.shapecache[shapename] _.shapecache.pop(shapename) except: pass return None def __angle__(deg): if deg == u'any': return random.random() * 36000 return deg * 100 def turnleft(deg): rotate(__TURTLE__, __angle__(deg)) def turnright(deg): rotate(__TURTLE__, -__angle__(deg)) def heading(deg = -1, go = False): turtle = __getshape__(__TURTLE__) if deg == -1: return -turtle.RotateAngle / 100 + 360 else: if deg == u'any': turtle.RotateAngle = random.random() * 36000 elif type(deg) == list: pos = turtle.getPosition() px, py = pos.X + turtle.BoundRect.Width / 2.0, pos.Y + turtle.BoundRect.Height / 2.0 dx = px * __MM10_TO_TWIP__ - deg[0] * __PT_TO_TWIP__ dy = deg[1] * __PT_TO_TWIP__ - py * __MM10_TO_TWIP__ n = sqrt(dx**2 + dy**2) if dy > 0 and n > 0: turtle.RotateAngle = a = -(180 + asin(dx / n) / (pi/180)) * 100 + 72000 # +720 for max(angle, preciseAngle) of __go__() elif n > 0: turtle.RotateAngle = a = asin(dx / n) / (pi/180) * 100 + 72000 if go and n > 0: __go__(__TURTLE__, -n, False, a) else: turtle.RotateAngle = -deg * 100 def rotate(shapename, deg): shape = __getshape__(shapename) if shape: shape.RotateAngle = shape.RotateAngle + deg def forward(n): if type(n) == list: pos = position() angle = heading() dx = n[1] * sin((pi/180) * angle) + n[0] * sin((pi/180)*(angle + 90)) dy = n[1] * cos((pi/180) * angle) + n[0] * cos((pi/180)*(angle + 90)) position([pos[0] + dx, pos[1] - dy]) elif type(n) == str: siz = label([1, 1, n]) shape = __getshape__(__ACTUAL__) pos = position() angle = heading() w, h = siz.Width / (__PT_TO_TWIP__ / __MM10_TO_TWIP__), siz.Height / (__PT_TO_TWIP__ / __MM10_TO_TWIP__) dx = 0 * sin((pi/180) * (angle)) + w * sin((pi/180)*(angle + 90)) dy = 0 * cos((pi/180) * (angle)) + w * cos((pi/180)*(angle + 90)) position([pos[0] + dx, pos[1] - dy]) heading(angle) else: __go__(__TURTLE__, -n * __PT_TO_TWIP__) def backward(n): if type(n) == list: forward([-n[0], -n[1]]) turnright(180) else: __go__(__TURTLE__, n * __PT_TO_TWIP__) def __dots__(n, pos, dx, dy, r = -1, q = 0): # dots for dotted polyline or circle f = [1, 4, 4, 4, 4][q] k = abs(int(1.0 * n / max(20, _.pensize) / 2.0 / f)) dots = [] px, py = pos.X, pos.Y for i in range(k + 1): if k > 0: if r != -1: px, py = pos.X + sin(((f-1)*(q-1)*30 + 360.0/f/k * i) * pi/180.0) * r[0], pos.Y + cos(((f-1)*(q-1)*30 + 360.0/f/k * i) * pi/180) * r[1] else: px, py = pos.X + round(i * dx/k), pos.Y + round(i * dy/k) dots += [(__Point__(px, py), __Point__(px + 7, py + 7))] return dots def __draw__(d, count = True): shape = _.doc.createInstance( "com.sun.star.drawing." + d) shape.AnchorType = __AT_PAGE__ shape.TextWrap = __THROUGHT__ __visible__(shape, False) while __zoom__(): # temporary fix program halt with continuous zoom while __zoom__(): __time__.sleep(0.2) __time__.sleep(0.2) _.drawpage.add(shape) if __group__: __group__.add(shape) if count: _.shapecache[next(_.shapecount)] = str(_.time) return shape def __zoom__(): z = _.doc.CurrentController.getViewSettings().ZoomValue if z != _.zoomvalue: _.zoomvalue = z return True return False def __lefthang__(shape): global __grouplefthang__ if __group__: p = shape.getPosition() if p.X < __grouplefthang__: __grouplefthang__ = p.X def __go__(shapename, n, dot = False, preciseAngle = -1): turtle = __getshape__(shapename) turtlepos = None if shapename == __TURTLE__: try: turtlepos = turtle.PolyPolygon[-1][-1] except: pass pos = turtle.getPosition() dx = n * sin((pi/180)*(max(turtle.RotateAngle, preciseAngle)/100)) dy = n * cos((pi/180)*(max(turtle.RotateAngle, preciseAngle)/100)) turtle.setPosition(__Point__(pos.X + dx / __MM10_TO_TWIP__, pos.Y + dy / __MM10_TO_TWIP__)) if (_.pencolor != _.oldlc or _.pensize != _.oldlw or _.linestyle != _.oldls or _.linejoint != _.oldlj or _.linecap != _.oldlcap): __removeshape__(__ACTUAL__) shape = None else: shape = __getshape__(__ACTUAL__) _.oldlw = _.pensize _.oldlc = _.pencolor _.oldls = _.linestyle _.oldlj = _.linejoint _.oldlcap = _.linecap if shape and not _.pen and not dot: _.continuous = False return c, c2 = __Point__(pos.X + turtle.BoundRect.Width / 2.0, pos.Y + turtle.BoundRect.Height / 2.0), __Point__(round(dx), round(dy)) if shape and "LineShape" in shape.ShapeType: if _.continuous or dot: last = shape.PolyPolygon[-1][-1] if not (turtlepos and (abs(last.X - turtlepos.X) > 100 or abs(last.Y - turtlepos.Y) > 100) and (not __group__ or (shape.getPosition().X > 0 and turtle.getPosition().X > 0))): # picture [ ] keeps hanging shapes if dot or _.linestyle == __LineStyle_DOTTED__: shape.PolyPolygon = tuple( list(shape.PolyPolygon) + __dots__(n, turtlepos, dx, dy)) else: last.X = last.X + c2.X last.Y = last.Y + c2.Y shape.PolyPolygon = tuple( list(shape.PolyPolygon[:-1]) + [tuple( list(shape.PolyPolygon[-1]) + [last])]) __lefthang__(shape) return elif turtlepos: shape.PolyPolygon = tuple( list(shape.PolyPolygon) + [(turtlepos, __Point__(turtlepos.X + c2.X, turtlepos.Y + c2.Y))]) _.continuous = True __lefthang__(shape) return if not _.pen and not dot: return shape = __draw__("PolyLineShape") shape.RotateAngle = 0 shape.PolyPolygon = tuple([tuple([__Point__(0, 0)])]) shape.setPosition(c) last = shape.PolyPolygon[-1][-1] last2 = __Point__(last.X + c2.X, last.Y + c2.Y) shape.LineStyle, shape.LineDash = __linestyle__(_.linestyle) shape.LineJoint = _.linejoint shape.LineCap = _.linecap if dot or _.linestyle == __LineStyle_DOTTED__: shape.PolyPolygon = tuple( list(shape.PolyPolygon) + __dots__(n, last, c2.X, c2.Y)) shape.LineStart = __bezierdot__ shape.LineStartCenter = True shape.LineStartWidth = max(20, _.pensize) / __MM10_TO_TWIP__ shape.LineWidth = 0 else: shape.PolyPolygon = tuple([tuple( list(shape.PolyPolygon[-1]) + [last2])]) shape.LineWidth = _.pensize / __MM10_TO_TWIP__ shape.LineColor, shape.LineTransparence = __splitcolor__(_.pencolor) if shape.LineTransparence == 100: shape.LineStyle = 0 __visible__(shape, True) shape.Name = __ACTUAL__ _.shapecache[__ACTUAL__] = shape _.oldlw = _.pensize _.oldlc = _.pencolor _.oldls = _.linestyle _.oldlj = _.linejoint _.oldlcap = _.linecap _.continuous = True __lefthang__(shape) def __fillit__(filled = True): oldshape = __getshape__(__ACTUAL__) if oldshape and oldshape.LineStartCenter: __removeshape__(__ACTUAL__) # FIXME close dotted polyline return if oldshape and "LineShape" in oldshape.ShapeType: shape = __draw__("PolyPolygonShape", False) shape.PolyPolygon = oldshape.PolyPolygon shape.setPosition(oldshape.getPosition()) shape.LineStyle, shape.LineDash = __linestyle__(_.linestyle) shape.LineJoint = _.linejoint shape.LineCap = _.linecap shape.LineWidth = _.pensize / __MM10_TO_TWIP__ shape.LineColor, shape.LineTransparence = __splitcolor__(_.pencolor) shape.FillColor, shape.FillTransparence = __splitcolor__(_.areacolor, shape) if _.hatch: shape.FillBackground = True if shape.FillTransparence != 100 else False shape.FillHatch = _.hatch shape.FillStyle = 3 elif type(_.areacolor) != tuple: shape.FillStyle = int(filled) if shape.LineTransparence == 100: shape.LineStyle = 0 if shape.FillTransparence == 100: shape.FillTransparence = 0 # for hatching and better modifications on UI if not _.hatch: shape.FillStyle = 0 shape.setString(oldshape.getString()) oldshape.Name = "" shape.Name = __ACTUAL__ _.shapecache[__ACTUAL__] = shape if __group__: __group__.remove(oldshape) __visible__(shape, True) _.drawpage.remove(oldshape) elif oldshape and "PolyPolygon" in oldshape.ShapeType: oldshape.LineStyle = int(_.pen) oldshape.LineJoint = _.linejoint oldshape.LineCap = _.linecap if _.hatch: oldshape.FillBackground = True oldshape.FillHatch = _.hatch oldshape.FillStyle = 3 else: oldshape.FillStyle = int(filled) oldshape.LineWidth = _.pensize / __MM10_TO_TWIP__ oldshape.LineColor, oldshape.LineTransparence = __splitcolor__(_.pencolor) oldshape.FillColor, oldshape.FillTransparence = __splitcolor__(_.areacolor, oldshape) def point(): oldpen, _.pen = _.pen, 1 oldstyle, _.linestyle = _.linestyle, __LineStyle_DOTTED__ __go__(__TURTLE__, 0, True) _.pen, _.linestyle = oldpen, oldstyle def __boxshape__(shapetype, l): turtle = __getshape__(__TURTLE__) shape = __draw__(shapetype + "Shape") pos = turtle.getPosition() pos.X = pos.X - (l[0] * __PT_TO_TWIP__ / __MM10_TO_TWIP__ / 2) + turtle.BoundRect.Width / 2.0 pos.Y = pos.Y - (l[1] * __PT_TO_TWIP__ / __MM10_TO_TWIP__ / 2) + turtle.BoundRect.Height / 2.0 shape.setPosition(pos) shape.setSize(__Size__(l[0] * __PT_TO_TWIP__ / __MM10_TO_TWIP__, l[1] * __PT_TO_TWIP__ / __MM10_TO_TWIP__)) shape.LineStyle, shape.LineDash = __linestyle__(_.linestyle) shape.LineWidth = _.pensize / __MM10_TO_TWIP__ shape.LineJoint = _.linejoint shape.LineCap = _.linecap shape.LineColor, shape.LineTransparence = __splitcolor__(_.pencolor) shape.FillColor, shape.FillTransparence = __splitcolor__(_.areacolor, shape, turtle.RotateAngle) if _.hatch: shape.FillBackground = True if shape.FillTransparence != 100 else False shape.FillHatch = _.hatch shape.FillStyle = 3 elif type(_.areacolor) != tuple: shape.FillStyle = 1 if shape.LineTransparence == 100: shape.LineStyle = 0 if shape.FillTransparence == 100: shape.FillTransparence = 0 # for hatching and better modifications on UI if not _.hatch: shape.FillStyle = 0 shape.RotateAngle = turtle.RotateAngle if shapetype == "Rectangle" and len(l) > 2: shape.CornerRadius = (l[2] * __PT_TO_TWIP__) / __MM10_TO_TWIP__ elif shapetype == "Ellipse" and len(l) > 2: try: shape.CircleKind = __SECTION__ shape.CircleStartAngle = (-l[3] - 270) * 100 shape.CircleEndAngle = (-l[2] - 270) * 100 shape.CircleKind = [__FULL__, __SECTION__, __CUT__, __ARC__][l[4]] except: pass __visible__(shape, True) __removeshape__(__ACTUAL__) _.shapecache[__ACTUAL__] = shape __lefthang__(shape) def ellipse(l): if type(l) != type([]): # default for circle and square l = [l, l] if _.linestyle == __LineStyle_DOTTED__: __groupstart__() _.linestyle = __LineStyle_SOLID__ pc, _.pencolor = _.pencolor, 0xff000000 ellipse(l) _.pencolor, _.linestyle = pc, __LineStyle_DOTTED__ point() shape = __getshape__(__ACTUAL__) shape.PolyPolygon = tuple(__dots__(max(l[0], l[1]) * pi * __PT_TO_TWIP__, shape.PolyPolygon[0][0], 0, 0, [i/2.0 * __PT_TO_TWIP__ for i in l])) turtle = __getshape__(__TURTLE__) shape.RotateAngle = turtle.RotateAngle __groupend__() else: __boxshape__("Ellipse", l) def rectangle(l): if type(l) != type([]): # default for circle and square l = [l, l] if _.linestyle == __LineStyle_DOTTED__: __groupstart__() _.linestyle = __LineStyle_SOLID__ pc, _.pencolor = _.pencolor, 0xff000000 rectangle(l) _.pencolor, _.linestyle = pc, __LineStyle_DOTTED__ point() shape = __getshape__(__ACTUAL__) if type(l) != type([]): l = [l, l] if len(l) == 2: l = l + [0] l = [i * __PT_TO_TWIP__ for i in l] c = shape.PolyPolygon[0][0] k = [min(l[0] / 2.0, l[2]), min(l[1] / 2.0, l[2])] p = __dots__(l[0] - 2 * k[0], __Point__(c.X - l[0]/2 + k[0], c.Y - l[1]/2), l[0] - 2 * k[0], 0) p = p[:-1] + __dots__(l[1] - 2 * k[1], __Point__(c.X + l[0]/2, c.Y - l[1]/2 + k[1]), 0, l[1] - 2 * k[1]) p = p[:-1] + __dots__(l[0] - 2 * k[0], __Point__(c.X + l[0]/2 - k[0], c.Y + l[1]/2), -l[0] + 2 * k[0], 0) p = p[:-1] + __dots__(l[1] - 2 * k[1], __Point__(c.X - l[0]/2, c.Y + l[1]/2 - k[1]), 0, -l[1] + 2 * k[1]) if l[2] > 0: p = p + __dots__(max(k) * 2 * pi, __Point__(c.X - l[0]/2 + k[0], c.Y - l[1]/2 + k[1]), 0, 0, k, 3)[1:] p = p + __dots__(max(k) * 2 * pi, __Point__(c.X + l[0]/2 - k[0], c.Y - l[1]/2 + k[1]), 0, 0, k, 2)[1:] p = p + __dots__(max(k) * 2 * pi, __Point__(c.X + l[0]/2 - k[0], c.Y + l[1]/2 - k[1]), 0, 0, k, 1)[1:] p = p + __dots__(max(k) * 2 * pi, __Point__(c.X - l[0]/2 + k[0], c.Y + l[1]/2 - k[1]), 0, 0, k, 4)[1:] shape.PolyPolygon = tuple(p) turtle = __getshape__(__TURTLE__) shape.RotateAngle = turtle.RotateAngle __groupend__() else: __boxshape__("Rectangle", l) def label(st): if type(st) != type([]): st = [0, 0, st] # get text size shape = _.doc.createInstance( "com.sun.star.drawing.TextShape") shape.TextAutoGrowWidth = True shape.Visible = False actual = __getshape__(__ACTUAL__) _.drawpage.add(shape) text(shape, st[2]) z = shape.getSize() # show text using RectangleShape (for correct SVG export) ac, pc = _.areacolor, _.pencolor _.areacolor, _.pencolor = 0xff000000, 0xff000000 # invisible rectangle([z.Width / (__PT_TO_TWIP__ / __MM10_TO_TWIP__), z.Height / (__PT_TO_TWIP__ / __MM10_TO_TWIP__)]) _.drawpage.remove(shape) _.pencolor, _.areacolor = pc, ac lab = __getshape__(__ACTUAL__) text(lab, st[2]) if st[0] != 0 or st[1] != 0: pos = position() angle = heading() n = [st[0] * z.Width/2, st[1] * z.Height/2] dx = n[1] * sin((pi/180) * angle) + n[0] * sin((pi/180)*(angle + 90)) dy = n[1] * cos((pi/180) * angle) + n[0] * cos((pi/180)*(angle + 90)) lab.setPosition(__Point__(round(pos[0] * __PT_TO_TWIP__ / __MM10_TO_TWIP__ + dx - lab.BoundRect.Width/2), round(pos[1] * __PT_TO_TWIP__ / __MM10_TO_TWIP__ - dy - lab.BoundRect.Height/2))) _.shapecache[__ACTUAL__] = actual return z def text(shape, st): if shape: shape.setString(__string__(st, _.decimal)) c = shape.createTextCursor() c.gotoStart(False) c.gotoEnd(True) c.CharColor, none = __splitcolor__(_.textcolor) c.CharHeight = _.fontheight c.CharWeight = __fontweight__(_.fontweight) c.CharPosture = __fontstyle__(_.fontstyle) c.CharFontName = _.fontfamily def sleep(t): _.time = _.time + t __removeshape__(__ACTUAL__) for i in range(int(t/__SLEEP_SLICE_IN_MILLISECONDS__)): __checkhalt__() __time__.sleep(0.5) __checkhalt__() __time__.sleep(t%__SLEEP_SLICE_IN_MILLISECONDS__/1000.0) def __removeshape__(shapename): try: _.shapecache.pop(shapename).Name = "" except: pass def __fontweight__(w): if type(w) == int: return w elif re.match(__l12n__(_.lng)['BOLD'], w, flags = re.I): return 150 elif re.match(__l12n__(_.lng)['NORMAL'], w, flags = re.I): return 100 return 100 def __fontstyle__(w): if type(w) == int: return w elif re.match(__l12n__(_.lng)['ITALIC'], w, flags = re.I): return __Slant_ITALIC__ elif re.match(__l12n__(_.lng)['UPRIGHT'], w, flags = re.I): return __Slant_NONE__ return __Slant_NONE__ def __color__(c): if type(c) in [int, float, long]: return c if type(c) == unicode: if c == u'any': rc, rv, rgray = __NORMCOLORS__[int(random.random()*7)], random.random(), random.random() ** 0.5 ratio = 1.0*abs(rc[2])/(abs(rc[2]) + abs(rc[4])) newcol = list(rc[0]) if rv < ratio: newcol[rc[1]] += rc[2] * rv/ratio else: newcol[rc[3]] += rc[4] * (rv - ratio)/(1 - ratio) # random grayness rdark = 1 - 2**4 * (random.random()-0.5)**4 for i in range(0, 3): newcol[i] = 255 * (rgray + (newcol[i]/255.0 - rgray) * rdark) return __color__(newcol) if c[0:1] == '~': c = __componentcolor__(__colors__[_.lng][c[1:].lower()]) for i in range(3): c[i] = max(min(c[i] + int(random.random() * 64) - 32, 255), 0) return __color__(c) return __colors__[_.lng][c.lower()] if type(c) == list: if len(c) == 1: # color index return __COLORS__[int(c[0])][1] elif len(c) == 3: # RGB return (int(c[0])%256 << 16) + (int(c[1])%256 << 8) + int(c[2])%256 elif len(c) == 2 or len(c) > 4: # gradient return (__color__(c[0]), __color__(c[1])) + tuple(c[2:]) return (int(c[3])%256 << 24) + (int(c[0])%256 << 16) + (int(c[1])%256 << 8) + int(c[2])%256 # RGB + alpha def __linestyle__(s): if _.pen == 0: return 0, __LineDash__() if _.linestyle == __LineStyle_DASHED__: return _.linestyle, __LineDash__(__DashStyle_RECT__, 0, 0, 1, 100, 100) elif _.linestyle == __LineStyle_DOTTED__: return __LineStyle_DASHED__, __LineDash__(__DashStyle_RECT__, 1, 1, 0, 0, 100000) elif type(s) == list: return __LineStyle_DASHED__, __LineDash__((s[5:6] or [0])[0], s[0], s[1] * __PT_TO_TWIP__, s[2], s[3] * __PT_TO_TWIP__, s[4] * __PT_TO_TWIP__) return s, __LineDash__() def fillstyle(s): if type(s) == list: color, null = __splitcolor__(__color__(s[1])) _.hatch = __Hatch__(s[0] - 1, color, s[2] * __PT_TO_TWIP__, s[3] * 10) elif s == 0: _.hatch = None elif s <= 10: # using hatching styles of Writer fillstyle([[1, 0, 5, 0], [1, 0, 5, 45], [1, 0, 5, -45], [1, 0, 5, 90], [2, [127, 0, 0], 5, 45], [2, [127, 0, 0], 5, 0], [2, [0, 0, 127], 5, 45], [2, [0, 0, 127], 5, 0], [3, [0, 0, 127], 5, 0], [1, 0, 25, 45]][s-1]) def __splitcolor__(c, shape = None, angle = None): if shape and (type(c) == tuple or type(_.t10y) == list): angle = heading() if angle == None else -angle / 100 + 360 if type(c) == tuple: shape.FillStyle = __FillStyle_GRADIENT__ # gradient color: [color1, color2, style, angle(must be positive for I/O), border, x_percent, y_percent, color1_intensity_percent, color2_intensity_percent] d, d[0:len(c)], c = [0, 0, __GradientStyle_LINEAR__, 0, 0, 0, 0, 100, 100], c, c[0] shape.FillGradient = __Gradient__(d[2], d[0], d[1], (-angle + d[3]) * 10 % 3600, d[4], d[5], d[6], d[7], d[8], 0) if type(_.t10y) == list: # transparency gradient: [begin_percent, end_percent, style, angle, border, x_percent, y_percent] table = _.doc.createInstance("com.sun.star.drawing.TransparencyGradientTable") if not table.hasByName(str(_.t10y) + str(angle)): t, t[0:len(_.t10y)] = [100, __GradientStyle_LINEAR__, 0, 0, 0, 0, 0], _.t10y table.insertByName(str(_.t10y) + str(angle), __Gradient__(t[2], t[0] * 0xffffff / 100.0, t[1] * 0xffffff / 100.0, (-angle + t[3]) * 10 % 3600, t[4], t[5], t[6], 100, 100, 0)) shape.FillTransparenceGradientName = str(_.t10y) + str(angle) c = 0 if type(c) == tuple else c & 0xffffff else: shape.FillStyle = __FillStyle_GRADIENT__ c = int(_.t10y * 255.0/100) << 24 """Split color constants to RGB (3-byte) + transparency (%)""" return int(c) & 0xffffff, (int(c) >> 24) / (255.0/100) def __componentcolor__(c): a = [ (c & 0xff0000) >> 16, (c & 0xff00) >> 8, c & 0xff ] if c > 2**24: a.append((c & 0xff000000) >> 24) return a def pencolor(n = -1): if n != -1: _.pencolor = __color__(n) turtle = __getshape__(__TURTLE__) if turtle and __visible__(turtle): turtle.LineColor, turtle.LineTransparence = __splitcolor__(_.pencolor) else: return __componentcolor__(_.pencolor) def pensize(n = -1): if n != -1: if n == 'any': _.pensize = random.random() * 10 * __PT_TO_TWIP__ else: _.pensize = n * __PT_TO_TWIP__ turtle = __getshape__(__TURTLE__) if turtle and __visible__(turtle): turtle.LineWidth = min(_.pensize, (1 + _.pen * 2) * __PT_TO_TWIP__) / __MM10_TO_TWIP__ return _.pensize / __PT_TO_TWIP__ def penstyle(n = -1): if n == -1: try: return __locname__(_.linestyle.value) except: return __locname__('DOTTED') if type(n) == list and len(n) >= 5: _.linestyle = n elif re.match(__l12n__(_.lng)['SOLID'], n, flags = re.I): _.linestyle = __LineStyle_SOLID__ elif re.match(__l12n__(_.lng)['DASH'], n, flags = re.I): _.linestyle = __LineStyle_DASHED__ elif re.match(__l12n__(_.lng)['DOTTED'], n, flags = re.I): _.linestyle = __LineStyle_DOTTED__ def penjoint(n = -1): if n == -1: return __locname__(_.linejoint.value) if re.match(__l12n__(_.lng)['NONE'], n, flags = re.I): _.linejoint = __Joint_NONE__ elif re.match(__l12n__(_.lng)['BEVEL'], n, flags = re.I): _.linejoint = __BEVEL__ elif re.match(__l12n__(_.lng)['MITER'], n, flags = re.I): _.linejoint = __MITER__ elif re.match(__l12n__(_.lng)['ROUNDED'], n, flags = re.I): _.linejoint = __ROUNDED__ def pencap(n = -1): if n == -1: return __locname__(_.linecap.value.replace('BUTT', 'NONE')) if re.match(__l12n__(_.lng)['NONE'], n, flags = re.I): _.linecap = __Cap_NONE__ elif re.match(__l12n__(_.lng)['ROUNDED'], n, flags = re.I): _.linecap = __Cap_ROUND__ elif re.match(__l12n__(_.lng)['SQUARE'], n, flags = re.I): _.linecap = __Cap_SQUARE__ def fillcolor(n = -1): if n != -1: _.areacolor = __color__(n) if type(_.areacolor) != tuple: _.t10y = (int(_.areacolor) >> 24) / (255.0/100) else: _.t10y = 0 turtle = __getshape__(__TURTLE__) if turtle and __visible__(turtle): turtle.FillColor, transparence = __splitcolor__(_.areacolor, turtle) turtle.FillTransparence = min(95, transparence) else: return __componentcolor__(_.areacolor) def filltransparency(n = -1): if n != -1: if n == u'any': n = 100 * random.random() if type(n) != list: if type(_.areacolor) != tuple: fillcolor((_.areacolor & 0xffffff) + (int(n * (255.0/100)) << 24)) else: _.t10y = n else: _.t10y = n else: return _.t10y def pentransparency(n = -1): if n != -1: if n == u'any': n = 100 * random.random() pencolor((_.pencolor & 0xffffff) + (int(n * (255.0/100)) << 24)) else: return _.pencolor >> 24 def fontcolor(n = -1): if n != -1: _.textcolor = __color__(n) else: return __componentcolor__(_.textcolor) def position(n = -1): turtle = __getshape__(__TURTLE__) if turtle: if n != -1: if n == 'any': ps = pagesize() heading([random.random() * ps[0], random.random() * ps[1]], True) else: heading(n, True) else: pos = turtle.getPosition() pos.X, pos.Y = pos.X + turtle.BoundRect.Width / 2.0, pos.Y + turtle.BoundRect.Height / 2.0 return [ pos.X * __MM10_TO_TWIP__ / __PT_TO_TWIP__, pos.Y * __MM10_TO_TWIP__ / __PT_TO_TWIP__ ] def __groupstart__(name = ""): global __group__, __grouplefthang__, __groupstack__ __removeshape__(__ACTUAL__) __groupstack__.append(__group__) if name != "": # store pic name (for correct repcount) __groupstack__.append(name) if ".SVG" == name[-4:].upper(): _.time = 0 _.shapecount = itertools.count() __groupstack__.append(__grouplefthang__) __group__ = uno.getComponentContext().ServiceManager.createInstance('com.sun.star.drawing.ShapeCollection') __grouplefthang__ = 0 def create_svg_animation(m): global _ id = int(m.group(1)) if id - 3 in _.shapecache: t = _.shapecache[id-3] opacity = "100" if t == "0" else "0" name = "" if id != 3 else "id=\"first\"" start = "%sms;last.end+%sms" % (t, t) if id == 3 else "first.end+%dms" % (int(t) - int(_.shapecache[0])) return '<g id="id%s" opacity="0"><animate %s attributeName="opacity" from="100" to="100" begin="%s" dur="1ms" fill="freeze"/><animate attributeName="opacity" from="100" to="%s" begin="last.end" dur="1ms" fill="freeze"/>' % (m.group(1), name, start, opacity) return m.group() def create_valid_svg_file(filename): with open(filename, "r") as f: s = f.read() s = re.sub('(?s)(<g\\sid="[^"]*)\(([^"]*)\)', '\\1\\2', s) # bad "(", ")" in xml:id s = re.sub('(?s)<g\\sooo:[^>]*>', '', s) # remove non standard attributes s = re.sub('(?s)<defs class="EmbeddedBulletChars">.*(?=<defs class="TextEmbeddedBitmaps")', '', s) # remove unused parts s = re.sub('(?s)(<path stroke-width="[^"]*"[^<]*)stroke-width="[^"]*"', '\\1', s) # double stroke-width s = re.sub('(?s)<svg\\s+version="1.2"', '<svg version="1.1"', s) # for W3C Validator if _.time > 0: s = re.sub('<g id="id([0-9]+)">', create_svg_animation, s) m = re.match('(?s)(.*<animate[^>]*first[.]end.([0-9]+)[^>]* dur=")1ms"', s) lasttime = _.time - int(m.group(2)) - int(_.shapecache[0]) + 1 if lasttime > 1: s = re.sub('(?s)(.*<animate[^>]*first[.]end.([0-9]+)[^>]* dur=")1ms"', m.group(1) + str(lasttime) + 'ms" id="last"', s) with open(filename, 'w') as f: f.write(s) def __groupend__(name = ""): global __group__, __grouplefthang__, __groupstack__, __halt__ g = 0 if __group__.getCount() > 1: if __grouplefthang__ < 0: for i in range(__group__.Count): s = __group__.getByIndex(i) p = s.getPosition() p.X = p.X + -__grouplefthang__ s.setPosition(p) g = _.drawpage.group(__group__) p = g.getPosition() p.X = p.X + __grouplefthang__ g.setPosition(p) else: g = _.drawpage.group(__group__) g.TextWrap = __THROUGHT__ elif __group__.getCount() == 1: g = __group__.getByIndex(0) __grouplefthang__ = min(__groupstack__.pop(), __grouplefthang__) if name != "": name = __groupstack__.pop() if name and ".SVG" == name[-4:].upper() and g: _.doc.CurrentController.select(g) __dispatcher__(".uno:Copy") ctx = XSCRIPTCONTEXT.getComponentContext() d = ctx.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", ctx) draw = d.loadComponentFromURL("private:factory/sdraw", "_blank", 0, ()) drawpage = draw.getDrawPages().getByIndex(0) while XSCRIPTCONTEXT.getDocument() != draw: if XSCRIPTCONTEXT.getDocument() not in [draw, _.doc, None]: __halt__ = True return __time__.sleep(0.1) __dispatcher__(".uno:Paste", (), draw) __dispatcher__(".uno:FormatGroup", (), draw) pic = drawpage.getByIndex(0) pic.setPosition(__Point__((g.BoundRect.Width - g.Size.Width)//2, (g.BoundRect.Height - g.Size.Height)//2)) drawpage.Height, drawpage.Width = g.BoundRect.Height, g.BoundRect.Width if not os.path.isabs(name): name = os.getcwd() + os.path.sep + name __dispatcher__(".uno:ExportTo", (__getprop__("URL", unohelper.systemPathToFileUrl(name)), __getprop__("FilterName", "draw_svg_Export")), draw) draw.close(True) while XSCRIPTCONTEXT.getDocument() != _.doc: if XSCRIPTCONTEXT.getDocument() not in [draw, _.doc, None]: __halt__ = True return __time__.sleep(0.1) create_valid_svg_file(name) __group__ = __groupstack__.pop() if __group__ and g: __group__.add(g) __removeshape__(__ACTUAL__) def __int__(x): # handle eg. int("10cm") if type(x) == str or type(x) == unicode: x = __float__(x) return int(x) def __float__(x): # handle eg. float("10,5cm") if type(x) == str or type(x) == unicode: for i in __comp__[_.lng]: x = re.sub(u"(?iu)" + i[0], i[1], x) x = eval(x) return float(x) def fontheight(n = -1): if n != -1: _.fontheight = n else: return _.fontheight def fontweight(n = -1): if n != -1: _.fontweight = n else: return _.fontweight def fontfamily(s = -1): if s != -1: _.fontfamily = s else: return _.fontfamily def fontstyle(n = -1): if n != -1: _.fontstyle = n else: return _.fontstyle def __loadlang__(lang, a): global comp, __colors__ __colors__[lang] = {} for i in __COLORS__: for j in a[i[0]].split("|"): __colors__[lang][j.lower()] = i[1] for i in a: if not i[0:3] in ["LIB", "ERR", "PT", "INC", "MM", "CM", "HOU", "DEG"] and not i in __STRCONST__: # uppercase native commands a[i] = a[i].upper() repcount = a['REPCOUNT'].split('|')[0] loopi = itertools.count() loop = lambda r: "%(i)s = 1\n%(orig)s%(j)s = %(i)s\n%(i)s += 1\n" % \ { "i": repcount + str(next(loopi)), "j": repcount, "orig": re.sub( r"(?ui)(?<!:)\b%s\b" % repcount, repcount + str(next(loopi)-1), r.group(0)) } __comp__[lang] = [ [r"(?i)(?<!:)(\b|(?=[-:]))(?:%s)\b" % "|".join([a[i].lower() for i in a if not "_" in i and i != "DECIMAL"]), lambda s: s.group().upper()], # uppercase all native commands in the source code [r"(?<!:)\b(?:%s) \[(?= |\n)" % a['GROUP'], "\n__groupstart__()\nfor __groupindex__ in range(2):\n[\nif __groupindex__ == 1:\n[\n__groupend__()\nbreak\n]\n"], [r"(?<!:)\b(?:%s) (%s[^[]*)\[(?= |\n)" % (a['GROUP'], __DECODE_STRING_REGEX__), "\n__groupstart__(\\1)\nfor __groupindex__ in range(2):\n[\nif __groupindex__ == 1:\n[\n__groupend__(\\1)\nbreak\n]\n"], [r"(?<!:)\b(?:%s)\b" % a['GROUP'], "\n__removeshape__(__ACTUAL__)\n"], [r"(\n| )][ \n]*\[(\n| )", "\n]\nelse:\n[\n"], # if/else block [r"(?<!\n)\[(?= |\n)", ":\n[\n"], # start block [r"( ]|\n]$)", "\n]\n"], # finish block [r"(?<!:)\b(?:%s)\b" % a['FOR'], "\nfor"], [r"(?<!:)\b(?:%s)\b" % a['REPEAT'], "\n__repeat__"], [r"(?<!:)\b(?:%s)\b" % a['BREAK'], "\nbreak"], [r"(?<!:)\b(?:%s)\b" % a['CONTINUE'], "\ncontinue"], [r"(?<!:)\b(?:%s)\b" % a['REPCOUNT'], repcount], [r"(?<!:)\b(?:%s)\b" % a['IF'], "\nif"], [r"(?<!:)\b(?:%s)\b" % a['WHILE'], "\nwhile"], [r"(?<!:)\b(?:%s)\b" % a['OUTPUT'], "\nreturn"], [r"\n(if|while|return) [^\n]*", lambda r: re.sub("(?<![=!<>])=(?!=)", "==", r.group(0))], # = -> ==, XXX x = y = 1? [r"(?<=\n)(for\b :?\w+) ([^\n]+)(?<=\w|]|}|\))(?=-|:)(?:%s)\b" % a['IN'], "\\1 in \\2"], # "for x y-in" -> "for x in y" [r"(:?\b\w+|[[][^[\n]*])\b(?:%s)\b" % a['IN'], "in \\1"], # "x y-in" -> "x in y" [r"(?<!:)\b(?:%s)\b" % a['IN'], "in"], [r"(?<!:)\b(?:%s)\b[ \t]+(:?\w+)\b(?! in\b)" % a['FOR'], "\nfor \\1 in"], [r"(?<=\n)__repeat__ :\n", "while True:\n"], # infinite loop [r"(?<=\n)(for|while) (?!__groupindex__)[^\n]*:\n\[\n", loop], # loop variables for repcount (not groupindex loop) [r"(?<=\n)__repeat__([^\n]*\w[^\n]*):(?=\n)", "for %s in range(1, 1+int(\\1)):" % repcount], # repeat block [r"(?<=\d)[%s](?=\d)" % a['DECIMAL'], "."], # decimal sign [r"(?<!/)/(?!/)", "*1.0/"], # fix division: /1 -> /1.0, but not with // [r"\b([0-9]+([,.][0-9]+)?)(%s)\b" % a['HOUR'], lambda r: str(float(r.group(1).replace(",", "."))*30)], # 12h = 12*30° [r"(?<=\d)(%s)" % a['DEG'], ""], # 1° -> 1 [r"(?<!:)\b(?:__def__)[ \t]+(\w+)\b[ \t]*([:]?\w[^\n]*)", "\ndef \\1(\\2):\n["], [r"(?<!:)\b(?:__def__)\s+(\w+)", "\ndef \\1():\n["], [r"(?<!:)\b(?:%s)\b" % a['END'], "\n]"], [r"(?<!:)\b(?:%s)\b" % a['GLOBAL'], "global"], [r"(?<!:)\b(?:%s)\b" % a['TRUE'], "True"], [r"(?<!:)\b(?:%s)\b" % a['FALSE'], "False"], [r"(?<!:)\b(?:%s)\b" % a['NOT'], "not"], [r"(?<!:)\b(?:%s)\b" % a['AND'], "and"], [r"(?<!:)\b(?:%s)\b" % a['OR'], "or"], [r"(?<!:)\b(?:%s)\b" % a['INT'], "__int__"], [r"(?<!:)\b(?:%s)\b" % a['FLOAT'], "__float__"], [r"(?<!:)\b(?:%s)\b" % a['STR'], "__string__"], [r"(?<!:)\b(?:%s)\b" % a['COUNT'], "len"], [r"(?<!:)\b(?:%s)\b" % a['ROUND'], "round"], [r"(?<!:)\b(?:%s)\b" % a['ABS'], "abs"], [r"(?<!:)\b(?:%s)\b" % a['SIN'], "sin"], [r"(?<!:)\b(?:%s)\b" % a['COS'], "cos"], [r"(?<!:)\b(?:%s)\b" % a['PI'], "pi"], [r"(?<!:)\b(?:%s)\b" % a['SQRT'], "sqrt"], [r"(?<!:)\b(?:%s)\b" % a['LOG10'], "log10"], [r"(?<!:)\b(?:%s)\b" % a['MIN'], "min"], [r"(?<!:)\b(?:%s)\b" % a['MAX'], "max"], [r"(?<!:)\b(?:%s)\b" % a['STOP'], "\nreturn None"], [r"(?<!:)\b(?:%s)\b" % a['CLEARSCREEN'], "\n__cs__()"], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENCOLOR'], "\n)pencolor("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENSTYLE'], "\n)penstyle("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENJOINT'], "\n)penjoint("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENCAP'], "\n)pencap("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FILLCOLOR'], "\n)fillcolor("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FILLTRANSPARENCY'], "\n)filltransparency("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENTRANSPARENCY'], "\n)pentransparency("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FILLSTYLE'], "\n)fillstyle("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FONTCOLOR'], "\n)fontcolor("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FONTFAMILY'], "\n)fontfamily("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FONTHEIGHT'], "\n)fontheight("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FONTWEIGHT'], "\n)fontweight("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['FONTSTYLE'], "\n)fontstyle("], [r"(?<!:)\b(?:%s)(\s+|$)" % a['PENWIDTH'], "\n)pensize("], [r"(?<!:)\b(?:%s)\b" % a['PENDOWN'], "\n__pen__(1)"], [r"(?<!:)\b(?:%s)\b" % a['PENUP'], "\n__pen__(0)"], [r"(?<!:)\b(?:%s)\b" % a['HIDETURTLE'], "\nhideturtle()"], [r"(?<!:)\b(?:%s)\b" % a['SHOWTURTLE'], "\nshowturtle()"], [r"(?<!:)\b(?:%s)\b\[" % a['POSITION'], "position()["], [r"(?<!:)\b(?:%s)\b(?!\()" % a['POSITION'], "\n)position("], [r"(?<!:)\b(?:%s)\b" % a['HEADING'], "\n)heading("], [r"(?<!:)\b(?:%s)\b" % a['PAGESIZE'], "pagesize()"], [r"(?<!:)\b(?:%s)\b" % a['POINT'], "\npoint()"], [r"(?<!:)\b(?:%s)\b" % (a['ELLIPSE'] + "|" + a['CIRCLE']), "\n)ellipse("], [r"(?<!:)\b(?:%s)\b" % (a['RECTANGLE'] + "|" + a['SQUARE']), "\n)rectangle("], [r"(?<!:)\b(?:%s)\b" % a['CLOSE'], "\n__fillit__(False)"], [r"(?<!:)\b(?:%s)\b" % a['FILL'], "\n__fillit__()"], [r"(?<!:)\b(?:%s)\b" % a['LABEL'], "\n)label("], [r"(?<!:)\b(?:%s)\b" % a['TEXT'], "\n)text(__getshape__(__ACTUAL__),"], [r"(text\([ \t]*\"[^\"\n\)]*)", "\\1\"\n"], [r"(?<!:)\b(?:%s)\b" % a['HOME'], "\nturtlehome()"], [r"(?<!:)\b(?:%s)\b" % a['SLEEP'], "\n)sleep("], [r"(?<!:)\b(?:%s)\b" % a['FORWARD'], "\n)forward("], [r"(?<!:)\b(?:%s)\b" % a['BACKWARD'], "\n)backward("], [r"(?<!:)\b(?:%s)\b" % a['TURNRIGHT'], "\n)turnright("], [r"(?<!:)\b(?:%s)\b" % a['RANDOM'], "Random"], [r"(?<!:)\b(?:%s)\b(?= \d)" % 'Random', "random.random()*"], [r"(?<!:)\b(?:%s)\b" % a['SET'], "set"], [r"(?<!:)\b(?:%s)\b" % a['RANGE'], "range"], [r"(?<!:)\b(?:%s)\b" % a['LIST'], "list"], [r"(?<!:)\b(?:%s)\b" % a['TUPLE'], "tuple"], [r"(?<!:)\b(?:%s)\b" % a['SORTED'], "sorted"], [r"(?<!:)\b(?:%s)\b ?\(" % a['RESEARCH'], "re.search('(?u)'+"], [r"(?<!:)\b(?:%s)\b ?\(" % a['RESUB'], "re.sub('(?u)'+"], [r"(?<!:)\b(?:%s)\b ?\(" % a['REFINDALL'], "re.findall('(?u)'+"], [r"(?<!:)\b(?:%s)\b" % a['ANY'], "u'any'"], [r"(?<!:)\b(?:%s) (\w+|[[][^\]]*])\b" % a['INPUT'], " Input(\\1)"], [r"(?<!:)\b(?:%s)\b" % a['PRINT'], "\n)Print("], [r"(?<!:)\b(?:%s)\b" % a['TURNLEFT'], "\n)turnleft("], [r"\b([0-9]+([,.][0-9]+)?)(%s)\b" % a['PT'], "\\1"], [r"\b([0-9]+([,.][0-9]+)?)(%s)(?!\w)" % a['INCH'], lambda r: str(float(r.group(1).replace(",", "."))*72)], [r"\b([0-9]+([,.][0-9]+)?)(%s)\b" % a['MM'], lambda r: str(float(r.group(1).replace(",", "."))*__MM_TO_PT__)], [r"\b([0-9]+([,.][0-9]+)?)(%s)\b" % a['CM'], lambda r: str(float(r.group(1).replace(",", "."))*__MM_TO_PT__*10)], [r"\b(__(?:int|float|string)__len|round|abs|sin|cos|sqrt|log10|set|list|tuple|sorted)\b ((?:\w|\d+([,.]\d+)?|0[xX][0-9a-fA-F]+|[-+*/]| )+)\)" , "\\1(\\2))" ], # fix parsing: (1 + sqrt x) -> (1 + sqrt(x)) [r"(?<=[-*/=+,]) ?\n\)(\w+)\(", "\\1()"], # read attributes, eg. x = fillcolor [r"(?<=return) ?\n\)(\w+)\(", "\\1()"], # return + user function [r"(?<=(?:Print|label)\() ?\n\)(\w+)\(", "\\1()\n"] # Print/label + user function ] def __concatenation__(r): # keep line positions with extra line breaks s = re.subn("~[ \t]*\n", " ", r.group(0)) return s[0] + "\n" * s[1] def __compil__(s): global _, comp, __strings__, __compiled__ try: c = _.doc.CurrentController.getViewCursor() locs = [i for i in [c.CharLocale, c.CharLocaleAsian, c.CharLocaleComplex] if i.Language != 'zxx'] # not None language loc = Locale(__uilocale__.split('-')[0], __uilocale__.split('-')[1], '') if locs and loc not in locs: loc = locs[0] try: _.lng = loc.Language + '_' + loc.Country __loadlang__(_.lng, __l12n__(_.lng)) except: __trace__() _.lng = loc.Language __loadlang__(_.lng, __l12n__(_.lng)) except: __trace__() _.lng = 'en_US' if not _.lng in __comp__: __loadlang__(_.lng, __l12n__(_.lng)) _.decimal = __l12n__(_.lng)['DECIMAL'] names = {} rmsp = re.compile(r"[ ]*([=+*/]|==|<=|>=|<>|!=|-[ ]+)[ ]*") chsp = re.compile(r"[ \t]+") chch = re.compile(r"(?u)(?<!\w):(?=\w)") parenfix = re.compile(r"(?ui)(\([^\(\[\]\)]+)]\)") # remove CR characters and split lines s = re.sub(r'[ \t\r]*(?=\n)', '', s) # remove full line comments s = re.sub(r"^[ \t]*[;#][^\n]*", "", s) s = re.sub(r"(?<=\n)[ \t]*[;#][^\n]*", "", s) # concatenate lines __compiled__ = re.sub(r'([^\n]*~[ \t]*\n)+[^\n]*', __concatenation__, s) # sign original line breaks s = re.sub("(?<=\n)", __LINEBREAK__ + "\n", __compiled__) # encode strings lq = '\'' + __l12n__(_.lng)['LEFTSTRING'].replace("|", "") rq = '\'' + __l12n__(_.lng)['RIGHTSTRING'].replace("|", "") __strings__ = [] s = re.sub("(?u)([%s])([^\n%s]*)(?<!\\\\)[%s]" % (lq, rq, rq), __encodestring__, s) s = re.sub('(?u)(?<![0-9])(")(~?\w*)', __encodestring__, s) # remove extra spaces s = chsp.sub(" ", s) # remove inline comments s = re.sub(r"[ ]*;[^\n]*", "", s) # n-dash and m-dash as minus signs s = re.sub(r"(?u)[–—]", "-", s) # replace procedure names s = re.sub(r"(?i)^[ ]*(%s)[ ]+" % __l12n__(_.lng)['TO'], "__def__ ", s) s = re.sub(r"(?i)\n[ ]*(%s)[ ]+" % __l12n__(_.lng)['TO'], "\n__def__ ", s) subnames = re.findall(u"(?iu)(?<=__def__ )\w+", s) globs = "" functions = ["range", "__int__", "__float__", "Random", "Input", "__string__", "len", "round", "abs", "sin", "cos", "sqrt", "log10", "set", "list", "tuple", "re.sub", "re.search", "re.findall", "sorted", "min", "max"] if len(subnames) > 0: globs = "global %s" % ", ".join(subnames) # search user functions (function calls with two or more arguments need explicite Python parentheses) ends = __l12n__(_.lng)["END"] # support multiple names of "END" firstend = ends.split("|")[0] s = re.sub(r"(?<!:)\b(?:%s)\b" % ends, firstend, s) __l12n__(_.lng)["END"] = firstend functions += [ re.findall("(?u)\w+",i[0])[0] for i in re.findall(r"""(?iu)(?<=__def__ )([^\n]*)\n # beginning of a procedure (?:[^\n]*(?<!\b(%(END)s))\n)* # 0 or more lines (not END) [^\n]*\b(?:%(OUTPUT)s)\b[^\n]*\n # line with OUTPUT (functions = procedures with OUTPUT) (?:[^\n]*(?<!\b(?:%(END)s))\n)* # 0 or more lines (not END) [ \t]*\b(?:%(END)s)\b""" % __l12n__(_.lng), s, re.X) ] __l12n__(_.lng)["END"] = ends # add line breaks before procedure calls procedures = set(subnames) - set(functions) if len(procedures) > 0: s = re.sub(r"(?<!__def__)(?<![-+=*/])(?<!%s)(?:^|[ \t]+)(" % ")(?<!".join(functions) + "|".join(procedures) + ")(?!\w)", r"\n\1", s) # compile native Logo for i in __comp__[_.lng]: s = re.sub(u"(?u)" + i[0], i[1], s) indent = 0 result = "" func = re.compile("(?iu)(def (\w+))(\(.*\):)") expr = r"""(?iu)(?<!def[ ])(?<![:\w])%(name)s(?!\w)(?!\()(?![ ]\() ( ([ ]+\[*([-+]|\([ ]?)*((%(functions)s)\b[ ]*\(*)* (?:0x[0-9a-f]+|[0-9]+([,.][0-9]+)?|:?\w+(?:[.]\w+[\(]?[\)]?)?]*|\[])]*[\)]* ( (?:[ ]*([+*/,<>]|//|==|<=|>=|<>|!=)[ ]*|[ ]*-[ ]+|-|[ ]*[*][*][ ]*) # operators, eg. "**", " - ", "-", "- " \[*([-+]|\([ ]?)* # minus sign, parenthesis ((%(functions)s)\b[ ]*\(*)*(0x[0-9a-f]+|[0-9]+([.,][0-9]+)?|:?\w+(?:[.]\w+[\(]?[\)]?)?)]* ([ ]?\))*)* [\)]*){,%(repeat)s} ) """ chargsp = re.compile(r"(?<![\(,])(?<!%s) (?!\)|,)" % ")(?<!".join(functions)) # compile to Python joinfunc = "|".join(functions) funcnames = {} for i in s.split("\n"): i = i.strip() if i[0:4] == 'def ': s = func.search(i) if s.group(3) == '():': names[s.group(2)] = (0, "") else: s2 = len(chsp.findall(s.group(3))) + 1 i = s.group(1) + chsp.sub(", ", s.group(3)) names[s.group(2)] = (s2, re.compile(expr % {"name": s.group(2), "functions": joinfunc, "repeat": s2}, re.X)) for j in functions: if j in i: if not j in funcnames: funcnames[j] = (1, re.compile(expr % {"name": j, "functions": joinfunc, "repeat": 1 + 2 * int(j == 'range')}, re.X)) r = funcnames[j][1].search(i) while r: i = i[:r.start()] + j + '(' + chargsp.sub(", ", rmsp.sub(lambda l: l.group(1).strip(), r.group(1).strip())) + ')' + i[r.end():] i = parenfix.sub("\\1)]", i) r = funcnames[j][1].search(i) for j in names: if j in i: if names[j][0] == 0: if not j in functions: i = re.sub(r"(?iu)(?<!def )(?<![_\w])\b%s\b(?!\w)" %j, j+'()', i) else: r = names[j][1].search(i) if r: i = i[:r.start()] + j + '(' + chargsp.sub(", ", rmsp.sub(lambda l: l.group(1).strip(), r.group(1).strip())) + ')' + i[r.end():] i = parenfix.sub("\\1)]", i) if i[0:1] == '[': i = i[1:] indent += 1 result = result + "\n" + " " * indent + "__checkhalt__()\n" if i[0:1] == ')': i = i[1:] + ')' result = result + "\n" + " " * indent + i if i[0:1] == ']': result = result[:-1] indent -= 1 # colon_to_underline in Logo variables result = chch.sub("_", result) # character encoding result = to_ascii(result).replace(r"\n", "\n") # decode strings result = re.sub(__DECODE_STRING_REGEX__, __decodestring__, result) return to_ascii(globs) + "\n" + result def __gotoline__(n): _.cursor.collapseToStart() for i in range(1, n): _.cursor.gotoNextParagraph(False) try: _.doc.CurrentController.getViewCursor().gotoRange(_.cursor, False) except: __dispatcher__(".uno:Escape") _.doc.CurrentController.getViewCursor().gotoRange(_.cursor.getStart(), False) g_exportedScripts = left, right, goforward, gobackward, run, stop, home, clearscreen, commandline, __translate__ # vim: set noet sw=4 ts=4:
Limezero/libreoffice
librelogo/source/LibreLogo/LibreLogo.py
Python
gpl-3.0
79,003
# # Copyright (C) 2015 FreeIPA Contributors see COPYING for license # from collections import namedtuple from ipalib import _ from ipalib import Command from ipalib import errors from ipalib import output from ipalib.parameters import Int from ipalib.plugable import Registry from ipapython.dn import DN __doc__ = _(""" Raise the IPA Domain Level. """) register = Registry() DomainLevelRange = namedtuple('DomainLevelRange', ['min', 'max']) domainlevel_output = ( output.Output('result', int, _('Current domain level:')), ) def get_domainlevel_dn(api): domainlevel_dn = DN( ('cn', 'Domain Level'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn ) return domainlevel_dn def get_domainlevel_range(master_entry): try: return DomainLevelRange( int(master_entry['ipaMinDomainLevel'][0]), int(master_entry['ipaMaxDomainLevel'][0]) ) except KeyError: return DomainLevelRange(0, 0) def check_conflict_entries(ldap, api, desired_value): """ Check if conflict entries exist in topology subtree """ container_dn = DN( ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn ) conflict = "(nsds5replconflict=*)" subentry = "(|(objectclass=ldapsubentry)(objectclass=*))" try: ldap.get_entries( filter="(& %s %s)" % (conflict, subentry), base_dn=container_dn, scope=ldap.SCOPE_SUBTREE) message = _("Domain Level cannot be raised to {0}, " "existing replication conflicts have to be resolved." .format(desired_value)) raise errors.InvalidDomainLevelError(reason=message) except errors.NotFound: pass def get_master_entries(ldap, api): """ Returns list of LDAPEntries representing IPA masters. """ container_masters = DN( ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn ) masters, _dummy = ldap.find_entries( filter="(cn=*)", base_dn=container_masters, scope=ldap.SCOPE_ONELEVEL, paged_search=True, # we need to make sure to get all of them ) return masters @register() class domainlevel_get(Command): __doc__ = _('Query current Domain Level.') has_output = domainlevel_output def execute(self, *args, **options): ldap = self.api.Backend.ldap2 entry = ldap.get_entry( get_domainlevel_dn(self.api), ['ipaDomainLevel'] ) return {'result': int(entry.single_value['ipaDomainLevel'])} @register() class domainlevel_set(Command): __doc__ = _('Change current Domain Level.') has_output = domainlevel_output takes_args = ( Int('ipadomainlevel', cli_name='level', label=_('Domain Level'), minvalue=0, ), ) def execute(self, *args, **options): """ Checks all the IPA masters for supported domain level ranges. If the desired domain level is within the supported range of all masters, it will be raised. Domain level cannot be lowered. """ ldap = self.api.Backend.ldap2 current_entry = ldap.get_entry(get_domainlevel_dn(self.api)) current_value = int(current_entry.single_value['ipadomainlevel']) desired_value = int(args[0]) # Domain level cannot be lowered if int(desired_value) < int(current_value): message = _("Domain Level cannot be lowered.") raise errors.InvalidDomainLevelError(reason=message) # Check if every master supports the desired level for master in get_master_entries(ldap, self.api): supported = get_domainlevel_range(master) if supported.min > desired_value or supported.max < desired_value: message = _("Domain Level cannot be raised to {0}, server {1} " "does not support it." .format(desired_value, master['cn'][0])) raise errors.InvalidDomainLevelError(reason=message) # Check if conflict entries exist in topology subtree # should be resolved first check_conflict_entries(ldap, self.api, desired_value) current_entry.single_value['ipaDomainLevel'] = desired_value ldap.update_entry(current_entry) return {'result': int(current_entry.single_value['ipaDomainLevel'])}
apophys/freeipa
ipaserver/plugins/domainlevel.py
Python
gpl-3.0
4,514
# -*- coding: utf-8 -*- # Copyright (C) 2010-2012 Nik Lutz <[email protected]> # Copyright (C) 2009 Harry Karvonen <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .lib_pulseaudio import * import math from VeromixUtils import * # This contains all basic volume features class PulseVolume: def __init__(self, vol, channels): self.channels = channels if vol > 100 or vol < 0: print("WARNING: Volume is invalid!") vol = 0 self.values = [vol] * self.channels return def __init__(self, values): self.channels = len(values) self.values = values return ############################## # # Type conversions # #def fromCtypes(self, pa_cvolume): # self.channels = pa_cvolume.channels # self.values = map(lambda x: (math.ceil(float(x) * 100 / PA_VOLUME_NORM)), # pa_cvolume.values[0:self.channels]) # return self def toCtypes(self): ct = struct_pa_cvolume() ct.channels = self.channels for x in range(0, self.channels): ct.values[x] = int((self.values[x] * PA_VOLUME_NORM) / 100) return ct def toCtypes2(self, num): ct = struct_pa_cvolume() ct.channels = num for x in range(0, num): ct.values[x] = (self.values[x] * PA_VOLUME_NORM) / 100 return ct ### def printDebug(self): print("PulseVolume") print("self.channels:", self.channels) print("self.values:", self.values) #print "self.proplist:", self.proplist ### def incVolume(self, vol): "Increment volume level (mono only)" vol += sum(self.values) / len(self.values) vol = int(vol) if vol > 100: vol = 100 elif vol < 0: vol = 0 self.setVolume(vol) return ### def setVolume(self, vol, balance = None): if not balance: self.values = [vol] * self.channels else: self.values[balance] = vol return ### def getVolume(self): "Return mono volume" return int(sum(self.values) / len(self.values)) ### def __str__(self): return "Channels: " + str(self.channels) + \ ", values: \"" + str([str(x) + "%" for x in self.values]) + "\"" ################################################################################ class PulseVolumeCtypes(PulseVolume): def __init__(self, pa_cvolume, pa_channel_map): self.channels = pa_cvolume.channels self.channel_map = pa_channel_map self.values = [(math.ceil(float(x) * 100 / PA_VOLUME_NORM)) for x in pa_cvolume.values[0:self.channels]] return def getVolumes(self): vol = {} for i in range(0, self.channels): key = pa_channel_position_to_pretty_string(self.channel_map.map[i]) entry = {} entry[in_unicode(key)] = self.values[i] vol[i] = entry return vol
Alwnikrotikz/veromix-plasmoid
dbus-service/pulseaudio/PulseVolume.py
Python
gpl-3.0
3,659
# Copyright 2013 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import its.image import its.caps import its.device import its.objects import its.target import pylab import os.path import matplotlib import matplotlib.pyplot def main(): """Test that the android.sensor.sensitivity parameter is applied. """ NAME = os.path.basename(__file__).split(".")[0] NUM_STEPS = 5 sensitivities = None r_means = [] g_means = [] b_means = [] with its.device.ItsSession() as cam: props = cam.get_camera_properties() its.caps.skip_unless(its.caps.compute_target_exposure(props) and its.caps.per_frame_control(props)) expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"] sens_range = props['android.sensor.info.sensitivityRange'] sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1) sensitivities = [sens_range[0] + i * sens_step for i in range(NUM_STEPS)] for s in sensitivities: req = its.objects.manual_capture_request(s, expt) cap = cam.do_capture(req) img = its.image.convert_capture_to_rgb_image(cap) its.image.write_image( img, "%s_iso=%04d.jpg" % (NAME, s)) tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1) rgb_means = its.image.compute_image_means(tile) r_means.append(rgb_means[0]) g_means.append(rgb_means[1]) b_means.append(rgb_means[2]) # Draw a plot. pylab.plot(sensitivities, r_means, 'r') pylab.plot(sensitivities, g_means, 'g') pylab.plot(sensitivities, b_means, 'b') pylab.ylim([0,1]) matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME)) # Test for pass/fail: check that each shot is brighter than the previous. for means in [r_means, g_means, b_means]: for i in range(len(means)-1): assert(means[i+1] > means[i]) if __name__ == '__main__': main()
s20121035/rk3288_android5.1_repo
cts/apps/CameraITS/tests/scene1/test_param_sensitivity.py
Python
gpl-3.0
2,539
""" This is the base model for Gasista Felice. It includes common data on which all (or almost all) other applications rely on. """ from django.db import models from django.utils.translation import ugettext, ugettext_lazy as _ from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured, ValidationError from django.db.models import permalink from django_comments.models import Comment from django.contrib.contenttypes.models import ContentType from django.conf import settings from django.dispatch import receiver from django.db.models.signals import post_save, pre_save from workflows.models import Workflow, Transition, State #from history.models import HistoricalRecords from consts import GAS_REFERRER_SUPPLIER from flexi_auth.models import PermissionBase # mix-in class for permissions management from flexi_auth.models import ParamRole, Param from flexi_auth.exceptions import WrongPermissionCheck from flexi_auth.utils import get_parametric_roles from flexi_auth.models import PrincipalParamRoleRelation from simple_accounting.models import economic_subject, AccountingDescriptor, LedgerEntry, account_type from lib import ClassProperty, unordered_uniq from gf.base import const from gf.base.utils import get_resource_icon_path from gf.base.accounting import PersonAccountingProxy from workflows.utils import do_transition import os import logging import geocoder log = logging.getLogger(__name__) class Resource(object): """Base class for project fundamental objects. This is a basic mix-in class used to factor out data/behaviours common to the majority of model classes in the project's applications. Resource API is composed of: * Basic methods and properties: * basic type and resource string representation * caching operations * Relational properties: * how the resource relates to other resources """ # Attribute used to make a list of confidential lists confidential_fields = () # Attribute used to cache data volatile_fields = [] #----------------------------------------- # Basic properites #----------------------------------------- @ClassProperty @classmethod def resource_type(cls): """String representation of resource type""" return cls.__name__.lower() @property def urn(self): """Unique resource name""" return '%s/%s' % (self.resource_type, self.pk) @property def ancestors(self): """List of ancestors of a resource. This is te list of parents from root to the resource itself. It is used p.e. to display navigation breadcrumbs. You SHOULD NOT implement it in subclasses """ if self.parent: return self.parent.ancestors + [self.parent] else: return [] @property def parent(self): """Identifies resource which includes this resource. Stated that there can be only one parent for a resource, (no multiple parents allowed), setting this attribute makes the resource confident of who includes itself. This attribute is then used to make the list of `:ref:ancestors`. You MUST implement it in subclasses if they have parent. """ return None def do_transition(self, transition, user): return do_transition(self, transition, user) @property def allnotes(self): ctype = ContentType.objects.get_for_model(self.__class__) notes = Comment.objects.filter(object_pk=self.pk, content_type=ctype).order_by('-submit_date') return notes @permalink def get_absolute_url(self): return ('rest.views.resource_page', (), { 'resource_type' : self.resource_type, 'resource_id' : self.pk }) def get_absolute_url_page(self): return self.get_absolute_url().replace('/rest', '/rest/#rest') def as_dict(self): return { 'name': unicode(self), 'urn' : self.urn, } #-- Referrers API --# @property def referrers(self): """Returns User QuerySet bound to resource""" raise NotImplementedError("class: %s method: referrers" % self.__class__.__name__) @property def referrer(self): """Return User bound to resource""" raise NotImplementedError("class: %s method: referrer" % self.__class__.__name__) @property def referrers_people(self): """Returns Person related to referrers QuerySet""" return Person.objects.filter(user__in=self.referrers) @property def info_people(self): """Returns Person to contact for info QuerySet""" raise NotImplementedError("class: %s method: info_people" % self.__class__.__name__) #-- History API --# # Requires that an history manager exists for the resource # TODO: encapsulate it in HistoryResource class @property def created_on(self): """Returns datetime instance of when the instance has been created.""" # There could be the case that a deleted id is reused, so, do not use .get method self_as_of_creation = \ self._default_history.filter(id=self.pk, history_type="+")[0] return self_as_of_creation.history_date @property def created_by(self): """Returns user that created the resource.""" #COMMENT fero: disabled user in history! return User.objects.none() # There could be the case that a deleted id is reused, so, do not use .get method self_as_of_creation = \ self._default_history.filter(id=self.pk, history_type="+")[0] return self_as_of_creation.history_user @property def created_by_person(self): """Returns person bound to the user that created the resource.""" u = self.created_by if u is not None: return u.person return None @property def last_update_by(self): """Returns user that has made the last update to the resource.""" #COMMENT fero: disabled user in history! return User.objects.none() # There could be the case that a deleted id is reused, so, do not use .get method try: self_as_of_last_update = \ self._default_history.filter(id=self.pk, history_type="~")[0] except IndexError: # This object has never been update return None else: return self_as_of_last_update.history_user @property def last_update_by_person(self): """Returns person bound to the user that made the last update the resource.""" u = self.last_update_by if u is not None: return u.person return None @property def updaters(self): """Returns User QuerySet of who has updated the resource.""" self_updaters = unordered_uniq( self._default_history.filter(id=self.pk, history_type="~").values_list('history_user') ) return User.objects.filter(pk__in=map(lambda x: x[0].pk, self_updaters)) #------------------------------------ # Basic properties: cache management #------------------------------------ def save_checkdata_in_cache(self): key = Resource.cache_key(self.pk) data_to_cache = {} for n in self.volatile_fields: data_to_cache[n] = getattr(self, n) if not data_to_cache: return False try: pstore.savedata(key, data_to_cache) except Exception, e: raise return True def load_checkdata_from_cache(self): if not self.volatile_fields: return False key = Resource.cache_key(self.pk) data = pstore.getalldata(key, self.volatile_fields) for n in self.volatile_fields: if data.has_key(n): setattr(self, n, data[n]) return True @classmethod def cache_key(cls, resource_id): #TODO fero CHECK #Pay attention because it is connected to class return "%s/%s" % (cls.resource_type, resource_id) #--------------------------------------------- # Relational properties: # not all must be implemented by Resource subclasses # but just only that makes sense #--------------------------------------------- @property def des_list(self): """Return DES instances bound to the resource""" raise NotImplementedError("class: %s method: des_list" % self.__class__.__name__) @property def des(self): """Return the DES instance bound to the resource""" from des.models import Siteattr return Siteattr.get_site() raise NotImplementedError("class: %s method: des" % self.__class__.__name__) @property def gas_list(self): """Return GAS list bound to resource""" raise NotImplementedError("class: %s method: gas_list" % self.__class__.__name__) @property def gas(self): """Return GAS bound to resource""" raise NotImplementedError("class: %s method: gas" % self.__class__.__name__) def categories(self): """Return ProductCategory bound to resource""" raise NotImplementedError("class: %s method: categories" % self.__class__.__name__) def category(self): """Return ProductCategory bound to resource""" raise NotImplementedError("class: %s method: category" % self.__class__.__name__) @property def persons(self): """Return persons bound to resource""" raise NotImplementedError("class: %s method: persons" % self.__class__.__name__) @property def person(self): """Return person bound to resource""" raise NotImplementedError("class: %s method: person" % self.__class__.__name__) @property def gasmembers(self): """Return GAS members bound to resource""" raise NotImplementedError("class: %s method: gasmembers" % self.__class__.__name__) @property def gasmember(self): """Return GAS member bound to resource""" raise NotImplementedError("class: %s method: gasmember" % self.__class__.__name__) @property def pacts(self): """Return pacts bound to resource""" raise NotImplementedError("class: %s method: pacts" % self.__class__.__name__) @property def pact(self): """Return pact bound to resource""" raise NotImplementedError("class: %s method: pact" % self.__class__.__name__) @property def suppliers(self): """Return suppliers bound to resource""" raise NotImplementedError("class: %s method: suppliers" % self.__class__.__name__) @property def supplier(self): """Return supplier bound to resource""" raise NotImplementedError("class: %s method: supplier" % self.__class__.__name__) @property def orders(self): """Return orders bound to resource""" raise NotImplementedError("class: %s method: orders" % self.__class__.__name__) @property def order(self): """Return order bound to resource""" raise NotImplementedError("class: %s method: order" % self.__class__.__name__) @property def deliveries(self): """Return deliveries bound to resource""" raise NotImplementedError("class: %s method: deliveries" % self.__class__.__name__) @property def delivery(self): """Return delivery bound to resource""" raise NotImplementedError("class: %s method: delivery" % self.__class__.__name__) @property def withdrawals(self): """Return withdrawals bound to resource""" raise NotImplementedError("class: %s method: withdrawals" % self.__class__.__name__) @property def withdrawal(self): """Return withdrawal bound to resource""" raise NotImplementedError("class: %s method: withdrawal" % self.__class__.__name__) @property def products(self): """Return products bound to resource""" raise NotImplementedError("class: %s method: products" % self.__class__.__name__) @property def product(self): """Return product bound to resource""" raise NotImplementedError("class: %s method: product" % self.__class__.__name__) @property def stocks(self): """Return SupplierStock list bound to resource""" raise NotImplementedError("class: %s method: stocks" % self.__class__.__name__) @property def stock(self): """Return SupplierStock bound to resource""" raise NotImplementedError("class: %s method: stock" % self.__class__.__name__) @property def orderable_products(self): """Return GASSupplierOrderProduct querySet for orders bound to resource""" raise NotImplementedError("class: %s method: orderable_products" % self.__class__.__name__) @property def ordered_products(self): """Return GASMemberOrder querySet for orders bound to resource""" raise NotImplementedError("class: %s method: ordered_products" % self.__class__.__name__) @property def basket(self): """Return GASMemberOrder querySet for open orders bound to resource""" raise NotImplementedError("class: %s method: basket" % self.__class__.__name__) #-- Contacts --# @property def contacts(self): """Contact QuerySet bound to the resource. You SHOULD override it when needed """ return self.contact_set.all() @property def email_address(self): return ", ".join(unordered_uniq(map(lambda x: x[0], self.contacts.filter(flavour=const.EMAIL).values_list('value')))) @property def phone_address(self): return ", ".join(unordered_uniq(map(lambda x: x[0], self.contacts.filter(flavour=const.PHONE).values_list('value')))) @property def preferred_email_address(self): """The email address, where we should write if we would know more info on the resource. It is not necessarily bound to a person. NOTE that it could be even a list of addresses following syntax in RFC 5322 and RFC 5321, or simply http://en.wikipedia.org/wiki/Email_address#Syntax :) Usually you SHOULD NOT NEED TO OVERRIDE IT in subclasses """ if settings.EMAIL_DEBUG: return settings.EMAIL_DEBUG_ADDR else: return ", ".join(unordered_uniq(map(lambda x: x[0], self.preferred_email_contacts.values_list('value')))) @property def preferred_email_contacts(self): """Email Contacts, where we should write if we would know more info on the resource. It is not necessarily bound to a person. Usually you SHOULD NOT NEED TO OVERRIDE IT in subclasses """ return self.contacts.filter(flavour=const.EMAIL, is_preferred=True) or \ self.contacts.filter(flavour=const.EMAIL) @property def preferred_phone_address(self): return ", ".join(unordered_uniq(map(lambda x: x[0], self.preferred_phone_contacts.values_list('value')))) @property def preferred_phone_contacts(self): return self.contacts.filter(flavour=const.PHONE, is_preferred=True) or \ self.contacts.filter(flavour=const.PHONE) # @property # def preferred_www_address(self): # return ", ".join(unordered_uniq(map(lambda x: x[0], self.preferred_www_contacts.values_list('value')))) # @property # def preferred_www_contacts(self): # return self.contacts.filter(flavour=const.WWW, is_preferred=True) or \ # self.contacts.filter(flavour=const.WWW) @property def preferred_fax_address(self): return ", ".join(unordered_uniq(map(lambda x: x[0], self.preferred_fax_contacts.values_list('value')))) @property def preferred_fax_contacts(self): return self.contacts.filter(flavour=const.FAX, is_preferred=True) or \ self.contacts.filter(flavour=const.FAX) @property def icon(self): "Returns default icon for resource""" icon = models.ImageField(upload_to="fake") basedir = os.path.join(settings.STATIC_URL, "nui", "img", settings.THEME) icon.url = os.path.join(basedir, "%s%s.%s" % (self.resource_type, "128x128", "png")) return icon #TODO CHECK if these methods SHOULD be removed from Resource API # because they are tied only to a specific resource. Leave commented now. # If you need them in a specific resource, implement in it # @property # def gasstocks(self): # """Return GASSupplierStock list bound to resource""" # raise NotImplementedError # # @property # def gasstock(self): # """Return GASSupplierStock bound to resource""" # raise NotImplementedError # # @property # def units(self): # """Return unit measure list bound to resource""" # raise NotImplementedError # # @property # def unit(self): # """Return unit measure bound to resource""" # raise NotImplementedError #--------------------------# @property def economic_movements(self): """Return accounting LedgerEntry instances.""" raise NotImplementedError @property def balance(self): """Return an economic state bound to resource (DES, GASMember, GAS or Supplier through ) Accounting sold for this ressource """ acc_tot = self.person.accounting.system['/wallet'].balance return acc_tot #------------------------------------------------------------------------------ class PermissionResource(Resource, PermissionBase): """ Just a convenience for classes inheriting both from `Resource` and `PermissionBase` """ def _get_roles(self): """ Return a QuerySet containing all the parametric roles which have been assigned to this Resource. """ # Roles MUST BE a property because roles are bound to a User # with `add_principal()` and not directly to a GAS member # costruct the result set by joining partial QuerySets roles = [] ctype = ContentType.objects.get_for_model(self) params = Param.objects.filter(content_type=ctype, object_id=self.pk) # get all parametric roles assigned to the Resource; return ParamRole.objects.filter(param_set__in=params) roles = property(_get_roles) @economic_subject class Person(models.Model, PermissionResource): """ A Person is an anagraphic record of a human being. It can be a User or not. """ name = models.CharField(max_length=128,verbose_name=_('name')) surname = models.CharField(max_length=128,verbose_name=_('surname')) display_name = models.CharField(max_length=128, blank=True, verbose_name=_('display name')) # Leave here ssn, but do not display it ssn = models.CharField(max_length=128, unique=True, editable=False, blank=True, null=True, help_text=_('Write your social security number here'),verbose_name=_('Social Security Number')) contact_set = models.ManyToManyField('Contact', null=True, blank=True,verbose_name=_('contacts')) user = models.OneToOneField(User, null=True, blank=True, verbose_name=_('User'), help_text=_("bind to a user if you want to give this person an access to the platform") ) address = models.ForeignKey('Place', null=True, blank=True,verbose_name=_('main address')) avatar = models.ImageField(upload_to=get_resource_icon_path, null=True, blank=True, verbose_name=_('avatar')) website = models.URLField(blank=True, verbose_name=_("web site")) accounting = AccountingDescriptor(PersonAccountingProxy) # #history = HistoricalRecords() class Meta: verbose_name = _("person") verbose_name_plural = _("people") ordering = ('display_name',) db_table = 'base_person' def __unicode__(self): rv = self.display_name if not rv: # If display name is not provided --> save display name rv = u'%(name)s %(surname)s' % {'name' : self.name, 'surname': self.surname} self.display_name = rv self.save() # Removed city visualization following Orlando's and Dominique's agreements # WAS: if self.city: # WAS: rv += u" (%s)" % self.city return rv @property def report_name(self): return u"%(name)s %(surname)s" % {'name' : self.name, 'surname': self.surname} def clean(self): if not self.user and self.gasmembers.count(): raise ValidationError(_("A person without user cannot be a GAS member")) self.name = self.name.strip().lower().capitalize() self.surname = self.surname.strip().lower().capitalize() self.display_name = self.display_name.strip() if not self.ssn: self.ssn = None else: self.ssn = self.ssn.strip().upper() return super(Person, self).clean() @property def uid(self): """ A unique ID (an ASCII string) for ``Person`` model instances. """ return self.urn.replace('/','-') @property def parent(self): return self.des @property def icon(self): return self.avatar or super(Person, self).icon ## START Resource API # Note that all the following methods return a QuerySet @property def persons(self): return Person.objects.filter(pk=self.pk) @property def person(self): return self @property def gasmembers(self): #TODO UNITTEST """ GAS members associated to this person; to each of them corresponds a membership of this person in a GAS. """ return self.gasmember_set.all() @property def gas_list(self): #TODO UNITTEST """ All GAS this person belongs to (remember that a person may be a member of more than one GAS). """ from gf.gas.models import GAS gas_pks = set(member.gas.pk for member in self.gasmembers) return GAS.objects.filter(pk__in=gas_pks) @property def des_list(self): #TODO UNITTEST """ All DESs this person belongs to (either as a member of one or more GAS or as a referrer for one or more suppliers in the DES). """ from des.models import DES des_set = set([gas.des for gas in self.gas_list]) return DES.objects.filter(pk__in=[obj.pk for obj in des_set]) @property def des(self): from des.models import Siteattr return Siteattr.get_site() @property def pacts(self): """ A person is related to: pacts signed with a GAS he/she belongs to """ from gf.gas.models import GASSupplierSolidalPact # initialize the return QuerySet qs = GASSupplierSolidalPact.objects.none() #add the suppliers who have signed a pact with a GAS this person belongs to for gas in self.gas_list: qs = qs | gas.pacts return qs @property def suppliers(self): #TODO UNITTEST """ A person is related to: 1) suppliers for which he/she is a referrer 2) suppliers who have signed a pact with a GAS he/she belongs to """ from gf.supplier.models import Supplier # initialize the return QuerySet qs = Supplier.objects.none() #add the suppliers who have signed a pact with a GAS this person belongs to for gas in self.gas_list: qs = qs | gas.suppliers # add the suppliers for which this person is an agent referred_set = set([sr.supplier for sr in self.supplieragent_set.all()]) qs = qs | Supplier.objects.filter(pk__in=[obj.pk for obj in referred_set]) return qs @property def orders(self): #TODO UNITTEST """ A person is related to: 1) supplier orders opened by a GAS he/she belongs to 2) supplier orders for which he/she is a referrer 3) order to suppliers for which he/she is a referrer """ from gf.gas.models import GASSupplierOrder # initialize the return QuerySet qs = GASSupplierOrder.objects.none() #add the supplier orders opened by a GAS he/she belongs to for gas in self.gas_list: qs = qs | gas.orders return qs @property def deliveries(self): #TODO UNITTEST """ A person is related to: 1) delivery appointments for which this person is a referrer 2) delivery appointments associated with a GAS he/she belongs to """ from gf.gas.models import Delivery # initialize the return QuerySet qs = Delivery.objects.none() # add delivery appointments for which this person is a referrer for member in self.gasmembers: qs = qs | member.delivery_set.all() # add delivery appointments associated with a GAS he/she belongs to for gas in self.gas_list: qs = qs | gas.deliveries return qs @property def withdrawals(self): #TODO UNITTEST """ A person is related to: 1) withdrawal appointments for which this person is a referrer 2) withdrawal appointments associated with a GAS he/she belongs to """ from gf.gas.models import Withdrawal # initialize the return QuerySet qs = Withdrawal.objects.none() # add withdrawal appointments for which this person is a referrer for member in self.gasmembers: qs = qs | member.withdrawal_set.all() # add withdrawal appointments associated with a GAS he/she belongs to for gas in self.gas_list: qs = qs | gas.withdrawals return qs ## END Resource API @property def city(self): if self.address: return self.address.city else: return None def setup_accounting(self): """ Accounting hierarchy for Person. . ROOT (/) |----------- wallet [A] +----------- incomes [P,I] + | +--- other (private order, correction, deposit) +----------- expenses [P,E] + +--- other (correction, donation, ) """ self.subject.init_accounting_system() # create a generic asset-type account (a sort of "virtual wallet") system = self.accounting.system system.get_or_create_account( parent_path='/', name='wallet', kind=account_type.asset ) # Expenses and incomes of other kind... system.get_or_create_account( parent_path='/expenses', name='other', kind=account_type.expense ) system.get_or_create_account( parent_path='/incomes', name='other', kind=account_type.income ) #----------------- Authorization API ------------------------# # Table-level CREATE permission @classmethod def can_create(cls, user, context): # Who can create a new Person in a DES ? # * DES administrators allowed_users = User.objects.none() try: des = context['site'] except KeyError: return User.objects.none() #raise WrongPermissionCheck('CREATE', cls, context) else: allowed_users = des.gas_tech_referrers return user in allowed_users # Row-level EDIT permission def can_edit(self, user, context): # Who can edit a Person in a DES ? # * the person itself # * administrators of one of the DESs this person belongs to des_admins = [] for des in self.des_list: des_admins += des.admins allowed_users = list(des_admins) + [self.user] return user in allowed_users # Row-level DELETE permission def can_delete(self, user, context): # Who can delete a Person from the system ? allowed_users = [self.user] return user in allowed_users #-----------------------------------------------------# @property def username(self): if self.user: return self.user.username else: return ugettext("has not an account in the system") display_fields = ( name, surname, models.CharField(name="city", verbose_name=_("City")), models.CharField(name="username", verbose_name=_("Username")), #DO NOT SHOW now models.CharField(name="email_address", verbose_name=_("Email")), #DO NOT SHOW now models.CharField(name="phone_address", verbose_name=_("Phone")), address, ) def has_been_member(self, gas): """ Return ``True`` if this person is bound to the GAS ``gas`` (GASMember exist whether it is suspended or not), ``False`` otherwise. If ``gas`` is not a ``GAS`` model instance, raise ``TypeError``. """ from gf.gas.models import GAS, GASMember if not isinstance(gas, GAS): raise TypeError(_(u"GAS membership can only be tested against a GAS model instance")) return bool(GASMember.all_objects.filter(gas=gas, person=self).count()) def is_member(self, gas): """ Return ``True`` if this person is an active (not suspended) member of GAS ``gas``, ``False`` otherwise. If ``gas`` is not a ``GAS`` model instance, raise ``TypeError``. """ from gf.gas.models import GAS if not isinstance(gas, GAS): raise TypeError(_(u"GAS membership can only be tested against a GAS model instance")) return gas in [member.gas for member in self.gasmembers] @property def full_name(self): return self.name + self.surname def save(self, *args, **kw): if not self.display_name: self.display_name = u"%(name)s %(surname)s" % {'name' : self.name, 'surname': self.surname} super(Person, self).save(*args, **kw) class Contact(models.Model): """If is a contact, just a contact email or phone""" flavour = models.CharField(max_length=32, choices=const.CONTACT_CHOICES, default=const.EMAIL,verbose_name=_('flavour')) value = models.CharField(max_length=256,verbose_name=_('value')) is_preferred = models.BooleanField(default=False,verbose_name=_('preferred')) description = models.CharField(max_length=128, blank=True, default='',verbose_name=_('description')) ##history = HistoricalRecords() class Meta: verbose_name = _("contact") verbose_name_plural = _("contacts") db_table = 'base_contact' def __unicode__(self): return u"%(t)s: %(v)s" % {'t': self.flavour, 'v': self.value} def clean(self): self.flavour = self.flavour.strip() if self.flavour not in map(lambda x: x[0], const.CONTACT_CHOICES): raise ValidationError(_("Contact flavour MUST be one of %s" % map(lambda x: x[0], const.CONTACT_CHOICES))) self.value = self.value.strip() self.description = self.description.strip() return super(Contact, self).clean() class Place(models.Model, PermissionResource): """Places should be managed as separate entities for various reasons: * among the entities arising in the description of GAS' activities, there are several being places or involving places, so abstracting this information away seems a good thing; * in the context of multi-GAS (retina) orders, multiple delivery and/or withdrawal locations can be present. """ name = models.CharField(max_length=128, blank=True, help_text=_("You can avoid to specify a name if you specify an address"),verbose_name=_('name')) description = models.TextField(blank=True,verbose_name=_('description')) # QUESTION: add place type from CHOICE (HOME, WORK, HEADQUARTER, WITHDRAWAL...) # ANSWER: no place type here. It is just a point in the map address = models.CharField(max_length=128, blank=True,verbose_name=_('address')) #zipcode as a string: see http://stackoverflow.com/questions/747802/integer-vs-string-in-database zipcode = models.CharField(verbose_name=_("Zip code"), max_length=128, blank=True) city = models.CharField(max_length=128,verbose_name=_('city')) province = models.CharField(max_length=2, help_text=_("Insert the province code here (max 2 char)"),verbose_name=_('province')) #Geolocation: do not use GeoDjango PointField here. #We can make a separate geo application maybe in future lon = models.FloatField(null=True, blank=True,verbose_name=_('lon')) lat = models.FloatField(null=True, blank=True,verbose_name=_('lat')) ##history = HistoricalRecords() class Meta: verbose_name = _("place") verbose_name_plural = _("places") ordering = ('name', 'address', 'city') db_table = 'base_place' def __unicode__(self): rv = u"" if self.name: rv += self.name + u" - " if self.address: rv += self.address + u", " if self.zipcode: rv += u"%s " % self.zipcode rv += self.city.lower().capitalize() if self.province: rv += u" (%s)" % self.province.upper() return rv # fetch coords from open street map def update_coords(self): addressString = self.zipcode + ' ' + self.city + ' ' + self.province + ' ' + self.address location = geocoder.osm(addressString) if location.status == 'OK': self.lon = location.lng self.lat = location.lat def clean(self): self.name = self.name.strip().lower().capitalize() self.address = self.address.strip().lower().capitalize() #TODO: we should compute city and province starting from zipcode using local_flavor in forms self.city = self.city.lower().capitalize() self.province = self.province.upper() self.zipcode = self.zipcode.strip() if self.zipcode: if settings.VALIDATE_NUMERICAL_ZIPCODES: try: int(self.zipcode) except ValueError: raise ValidationError(_("Wrong ZIP CODE provided")) self.description = self.description.strip() return super(Place, self).clean() def save(self, *args, **kw): #TODO: Copy-on-write model # a) check if an already existent place with the same full address exist and in that case force update # b) if we are updating a Place --> detach it from other stuff pointing to it and clone super(Place, self).save(*args, **kw) #----------------- Authorization API ------------------------# # Table-level CREATE permission @classmethod def can_create(cls, user, context): # Who can create a new Place in a DES ? # Everyone belongs to the DES try: des = context['site'] except KeyError: raise WrongPermissionCheck('CREATE', cls, context) else: # It's ok because only one DES is supported return not user.is_anonymous() # otherwhise it should be # return user in User.objects.filter(person__in=des.persons) # Row-level EDIT permission def can_edit(self, user, context): # Who can edit details of an existing place in a DES ? # (note that places can be shared among GASs) # * DES administrators # * User that created the place # * User who has updated it. How he can do it? # If a User try to create a new place with the same parameters # of an already existent one, he updates the place allowed_users = self.des.admins | self.created_by | self.updaters return user in allowed_users # Row-level DELETE permission def can_delete(self, user, context): # Who can delete an existing place from a DES ? # (note that places can be shared among GASs) # * DES administrators # * User that created the place # * User who has updated it. How he can do it? see can_edit above allowed_users = self.des.admins | self.created_by | self.updaters return user in allowed_users #-----------------------------------------------------# display_fields = ( name, description, address, zipcode, city, province ) # Generic workflow management class DefaultTransition(models.Model, PermissionResource): workflow = models.ForeignKey(Workflow, related_name="default_transition_set",verbose_name=_('workflow')) state = models.ForeignKey(State,verbose_name=_('state')) transition = models.ForeignKey(Transition,verbose_name=_('transition')) class Meta: verbose_name = _("default transition") verbose_name_plural = _("default transitions") db_table = 'base_defaulttransition' class WorkflowDefinition(object): """ This class encapsulates all the data and logic needed to create and setup a Workflow (as in the `django-workflows` app), including creation of States and Transitions, assignment of Transitions to States and specification of the initial state and the default Transition for each State. To setup a new Workflow, just specify the needed data in the declarative format described below, then call the `register_workflow` method. ## TODO: workflow declaration's specs go here. """ def __init__(self, workflow_name, state_list, transition_list, state_transition_map, initial_state, default_transitions): # stash the workflow specs for later use self.workflow_name = workflow_name self.state_list = state_list self.transition_list = transition_list self.state_transition_map = state_transition_map self.initial_state_name = initial_state self.default_transitions = default_transitions def register_workflow(self): # check workflow specifications for internal consistency; # return an informative error message to the user if the check fails try: self.check_workflow_specs() except ImproperlyConfigured, e: raise ImproperlyConfigured(_("Workflow specifications are not consistent.\n %s") % e) try: # Check for already existent workflow. Operation `register_workflow` is idempotent... Workflow.objects.get(name=self.workflow_name) except Workflow.DoesNotExist: # Initialize workflow self.workflow = Workflow.objects.create(name=self.workflow_name) ## create States objects self.states = {} # dictionary containing State objects for our Workflow for (key, name) in self.state_list: self.states[key] = State.objects.create(name=name, workflow=self.workflow) ## create Transition objects self.transitions = {} # dictionary containing Transition objects for the current Workflow for (key, transition_name, destination_name) in self.transition_list: dest_state = self.states[destination_name] self.transitions[key] = Transition.objects.create(name=transition_name, workflow=self.workflow, destination=dest_state) ## associate Transitions to States for (state_name, transition_name) in self.state_transition_map: log.debug("Workflow %(w)s, adding state=%(s)s transition=%(t)s" % { 'w' : self.workflow_name, 's' : state_name, 't' : transition_name, }) state = self.states[state_name] transition = self.transitions[transition_name] state.transitions.add(transition) ## set the initial State for the Workflow state = self.states[self.initial_state_name] self.workflow.initial_state = state self.workflow.save() ## define default Transitions for States in a Workflow, ## so we can suggest to end-users what the next "logical" State could be for (state_name, transition_name) in self.default_transitions: state = self.states[state_name] transition = self.transitions[transition_name] self.workflow.default_transition_set.add(DefaultTransition(state=state, transition=transition)) def check_workflow_specs(self): """Check the provided workflow specifications for internal consistency. Return True if the specs are fine, False otherwise. """ state_names = [key for (key, name) in self.state_list] transition_names = [key for (key, transition_name, destination_name) in self.transition_list] ## States have to be unique # TODO ## Transitions have to be unique # TODO ## a Transition must point to an existing State for (key, transition_name, destination_name) in self.transition_list: if destination_name not in state_names: raise ImproperlyConfigured("Transition %s points to the non-existent State %s" % (key, destination_name)) ## a Transition must be assigned to an existing State for (state_name, transition_name) in self.state_transition_map: if state_name not in state_names: raise ImproperlyConfigured("Transition %s can't be assigned to the non-existent State %s" % (transition_name, state_name)) ## initial State must exists if self.initial_state_name not in state_names: raise ImproperlyConfigured("Workflow %s: initial state %s must be included in state names %s" % (self.workflow_name, self.initial_state_name, state_names)) ## a default Transition for a State must exists and had to be previously assigned to that State for (state_name, transition_name) in self.default_transitions: if state_name not in state_names: raise ImproperlyConfigured("A default Transition can't be defined for the non-existent State %s" % state_name) elif transition_name not in transition_names: raise ImproperlyConfigured("The default Transition for the State %s can't be set to a non-existent Transitions %s" % (state_name, transition_name)) elif (state_name, transition_name) not in self.state_transition_map: raise ImproperlyConfigured("The default Transition for the State %s must be one of its valid Transitions" % state_name) #------------------------------------------------------------------------------- #This is an HACK used just because we need these users use parts of the web admin interface from consts import GAS_MEMBER , GAS_REFERRER_TECH, SUPPLIER_REFERRER from django.contrib.auth.models import Group, Permission # groups for users GROUP_TECHS = "techs" GROUP_SUPPLIERS = "suppliers" GROUP_REFERRER_SUPPLIERS = "gas_referrer_suppliers" GROUP_USERS = "users" GROUP_MEMBERS = "gasmembers" def init_perms_for_groups(): from gf.base.models import Person, Place, Contact from gf.gas.models import GAS, GASConfig, GASMember from gf.supplier.models import ( SupplierConfig, SupplierProductCategory, ProductCategory, SupplierStock, Product, Supplier ) from django.contrib.auth.models import User from django.contrib.auth import get_permission_codename g_techs = Group.objects.get(name=GROUP_TECHS) g_suppliers = Group.objects.get(name=GROUP_SUPPLIERS) g_referrers_suppliers = Group.objects.get(name=GROUP_REFERRER_SUPPLIERS) g_gasmembers = Group.objects.get(name=GROUP_MEMBERS) techs_perms_d = { Person : ('add', 'change', 'delete'), Place : ('add', 'change', 'delete'), Contact : ('add', 'change', 'delete'), GAS : ('change',), GASConfig : ('change',), SupplierConfig : ('change',), GASMember : ('add', 'change', 'delete'), SupplierProductCategory : ('add', 'change', 'delete'), ProductCategory : ('add', 'change', 'delete'), SupplierStock : ('add', 'change', 'delete'), Product : ('add', 'change', 'delete'), Supplier : ('add', 'change'), User : ('add', 'change',), # add User is important for Add GASMember Form! Leave it here now. TODO } supplier_perms_d = { Person : ('add', 'change'), Place : ('add', 'change'), Contact : ('add', 'change'), SupplierConfig : ('change',), SupplierProductCategory : ('add', 'change', 'delete'), SupplierStock : ('add', 'change', 'delete'), Product : ('add', 'change', 'delete'), Supplier : ('change',), } gas_referrer_supplier_perms_d = supplier_perms_d.copy() gas_referrer_supplier_perms_d.update({ Supplier : ('add', 'change'), }) gm_perms_d = { Person : ('change',), Place : ('add', 'change',), Contact : ('add', 'change',), } group_perms_d_tuples = ( (g_techs , techs_perms_d), (g_suppliers , supplier_perms_d), (g_referrers_suppliers , gas_referrer_supplier_perms_d), (g_gasmembers , gm_perms_d), ) for gr, perms_d in group_perms_d_tuples: for klass, actions in perms_d.items(): ctype = ContentType.objects.get_for_model(klass) for action in actions: codename = get_permission_codename(action, klass._meta) log.debug("Adding perm %s to group %s" % (codename, gr)) p = Permission.objects.get( content_type=ctype, codename=codename ) gr.permissions.add(p) def setup_data_handler(sender, instance, created, **kwargs): """ Ovverride temporarly for associating some groups to users This will be in use until some part of the interface use admin-interface. After this can be removed """ if created: # Check that groups exist. Create them the first time g_techs, created = Group.objects.get_or_create(name=GROUP_TECHS) g_suppliers, created = Group.objects.get_or_create(name=GROUP_SUPPLIERS) g_referrers_suppliers, created = Group.objects.get_or_create(name=GROUP_REFERRER_SUPPLIERS) g_gasmembers, created = Group.objects.get_or_create(name=GROUP_MEMBERS) if created: # Create all groups needed for this hack # Check only last... init_perms_for_groups() role_group_map = { GAS_MEMBER : g_gasmembers, GAS_REFERRER_SUPPLIER : g_referrers_suppliers, SUPPLIER_REFERRER : g_suppliers, GAS_REFERRER_TECH : g_techs, } # Set "is_staff" to access the admin inteface instance.user.is_staff = True instance.user.save() role_name = instance.role.role.name group = role_group_map.get(role_name) if group: try: instance.user.groups.add(group) except KeyError: log.debug("%s create cannot add %s's group %s(%s)" % (role_name, group, instance, instance.pk) ) # END hack #------------------------------------------------------------------------------- def validate(sender, instance, **kwargs): try: # `instance` is the model instance that has just been created instance.clean() except AttributeError: # sender model doesn't specify any sanitize operations, so just ignore the signal pass def setup_data(sender, instance, created, **kwargs): """ Setup proper data after a model instance is saved to the DB for the first time. This function just calls the `setup_data()` instance method of the sender model class (if defined); actual role-creation/setup logic is encapsulated there. """ if created: # Automatic data-setup should happen only at instance-creation time try: # `instance` is the model instance that has just been created instance.setup_data() except AttributeError: # sender model doesn't specify any data-related setup operations, so just ignore the signal pass # add `validate` function as a listener to the `pre_save` signal pre_save.connect(validate) # add `setup_data` function as a listener to the `post_save` signal post_save.connect(setup_data) post_save.connect(setup_data_handler, sender=PrincipalParamRoleRelation)
befair/gasistafelice
gasistafelice/gf/base/models.py
Python
agpl-3.0
48,753
#-*- coding:utf-8 -*- # # # Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>. # All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # import netsvc from osv import fields, osv import logging _logger = logging.getLogger(__name__) class department_selection(osv.osv_memory): _name = 'hr.schedule.validate.departments' _description = 'Department Selection for Validation' _columns = { 'department_ids': fields.many2many('hr.department', 'hr_department_group_rel', 'employee_id', 'department_id', 'Departments'), } def view_schedules(self, cr, uid, ids, context=None): data = self.read(cr, uid, ids, context=context)[0] return { 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'hr.schedule', 'domain': [('department_id', 'in', data['department_ids']), ('state', 'in', ['draft'])], 'type': 'ir.actions.act_window', 'target': 'new', 'nodestroy': True, 'context': context, } def do_validate(self, cr, uid, ids, context=None): wkf_service = netsvc.LocalService('workflow') data = self.read(cr, uid, ids, context=context)[0] sched_ids = self.pool.get('hr.schedule').search(cr, uid, [('department_id', 'in', data[ 'department_ids'])], context=context) for sched_id in sched_ids: wkf_service.trg_validate( uid, 'hr.schedule', sched_id, 'signal_validate', cr) return {'type': 'ir.actions.act_window_close'}
bwrsandman/openerp-hr
hr_schedule/wizard/validate_schedule.py
Python
agpl-3.0
2,390
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PySingledispatch(PythonPackage): """This library brings functools.singledispatch to Python 2.6-3.3.""" homepage = "https://pypi.python.org/pypi/singledispatch" url = "https://pypi.io/packages/source/s/singledispatch/singledispatch-3.4.0.3.tar.gz" version('3.4.0.3', sha256='5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c') depends_on('py-setuptools', type='build') depends_on('py-six', type=('build', 'run')) depends_on('py-ordereddict', when="^python@:2.6", type=('build', 'run'))
iulian787/spack
var/spack/repos/builtin/packages/py-singledispatch/package.py
Python
lgpl-2.1
762
import logging from datetime import datetime, timedelta from rapidsms.message import Message from rapidsms.i18n import ugettext_noop as _ from logger.models import IncomingMessage from weltel.models import Site, Nurse, Patient, PatientState, EventLog from weltel.models import UNSUBSCRIBE_CODE, INACTIVE_CODE ###################### # Callback Functions # ###################### def send_mambo(router, patient_id): response = "Mambo?" logging.info("Sending: %s" % response) connection = Patient.objects.get(id=patient_id).connection be = router.get_backend(connection.backend.slug) be.message(connection.identity, response).send() def shida_report(router, nurse=None): # list of 'shida' patients for each site, or for a specific nurse if given sites = nurse.sites.all() if nurse is not None else Site.objects.all() for site in sites: shida = PatientState.objects.get(code='shida') # get all active patients who responded shida or are in the default state patients = Patient.objects.filter(site=site).filter(state=shida).exclude(active=False).exclude(subscribed=False) # generate report report = '' for patient in patients: # take the last '1' from Patient ID BA3-2-1 id = patient.patient_id.rsplit('-',1)[-1] if hasattr(patient,'connection') and patient.connection is not None: report = report + "%(id)s-%(identity)s " % \ {'id':id, 'identity': patient.connection.identity} else: report = report + "%(id)s-None " % {'id':id } # send report to given nurse, or if nurse not supplied, # all nurses registered for that site nurses = [nurse] if nurse is not None else Nurse.objects.filter(sites=site).filter(subscribed=True) for n in nurses: be = router.get_backend(n.connection.backend.slug) if report: be.message(n.connection.identity, report).send() else: be.message(n.connection.identity, _("No problem patients")).send() def mark_inactive(router, timeout_weeks): timeout_interval = timedelta(weeks=timeout_weeks) timeout = datetime.now() - timeout_interval # check if patients have not been seen in a while patients = Patient.objects.all() active = patients.filter(active=True).filter(subscribed=True) for patient in active: active = False # check a) no messages in X time from any of their connections for conn in patient.connections.all(): try: log = IncomingMessage.objects.latest() except IncomingMessage.DoesNotExist: if patient.date_registered > timeout: active = True # no messages received yet. TODO: check for the signup date of the patient. # once we start logging sign-up dates, that is. else: if log.received > timeout: active = True continue # check b) no status updates from nurse try: last_touched = EventLog.objects.filter(patient=patient).latest() except EventLog.DoesNotExist: pass else: if last_touched.date > timeout: active = True if active == False: patient.register_event(INACTIVE_CODE) patient.active = False patient.save() return def other_report(router, nurse=None): # list of 'inactive' and unsubscribed patients for each site sawa = PatientState.objects.get(code='sawa') timeout_interval = timedelta(days=1) timeout = datetime.now() - timeout_interval sites = nurse.sites.all() if nurse is not None else Site.objects.all() for site in sites: report = '' # get all active patients who unsubscribed today report_unsubscribed = '' unsubscribed = Patient.objects.filter(site=site).filter(active=True).filter(subscribed=False) for p in unsubscribed: unsubscribe_event = EventLog.objects.filter(patient=p).filter(event__code=UNSUBSCRIBE_CODE).latest() if not unsubscribe_event: logging.error("Patient is unsubscribed without unsubscribe event!") elif unsubscribe_event.date > timeout: id = p.patient_id.rsplit('-',1)[-1] if hasattr(p, 'connection') and p.connection is not None: report_unsubscribed = report_unsubscribed + "%s-%s " % \ (id, p.connection.identity) else: report_unsubscribed = report_unsubscribed + "%s-None " % (id) # get patients who were marked 'inactive' today report_inactive = '' inactive = Patient.objects.filter(site=site).filter(active=False) for p in inactive: inactivated_event = EventLog.objects.filter(patient=p).filter(event__code=INACTIVE_CODE).latest() if not inactivated_event: logging.error("Patient is inactivated without inactivate event!") elif inactivated_event.date > timeout: id = p.patient_id.rsplit('-',1)[-1] if hasattr(p, 'connection') and p.connection is not None: report_inactive = report_inactive + "%s-%s " % (id, \ p.connection.identity) else: report_inactive = report_inactive + "%s-None " % (id) if report_unsubscribed: report = report + "Unsubscribed: " + report_unsubscribed if report_inactive: report = report + "Inactive: " + report_inactive # send report to all nurses registered for that site nurses = [nurse] if nurse is not None else Nurse.objects.filter(sites=site).filter(subscribed=True) for n in nurses: be = router.get_backend(n.connection.backend.slug) if report: be.message(n.connection.identity, report).send() else: be.message(n.connection.identity, _("No patients unsubscribed or were marked inactive today.")).send()
commtrack/temp-rapidsms
apps/weltel/callbacks.py
Python
lgpl-3.0
6,301
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A loc indexer for pandas-on-Spark DataFrame/Series. """ from abc import ABCMeta, abstractmethod from collections.abc import Iterable from functools import reduce from typing import Any, Optional, List, Tuple, TYPE_CHECKING, Union, cast, Sized import pandas as pd from pandas.api.types import is_list_like from pyspark.sql import functions as F, Column from pyspark.sql.types import BooleanType, LongType from pyspark.sql.utils import AnalysisException import numpy as np from pyspark import pandas as ps # noqa: F401 from pyspark.pandas._typing import Label, Name, Scalar from pyspark.pandas.internal import ( InternalField, InternalFrame, NATURAL_ORDER_COLUMN_NAME, SPARK_DEFAULT_SERIES_NAME, ) from pyspark.pandas.exceptions import SparkPandasIndexingError, SparkPandasNotImplementedError from pyspark.pandas.spark import functions as SF from pyspark.pandas.utils import ( is_name_like_tuple, is_name_like_value, lazy_property, name_like_string, same_anchor, scol_for, spark_column_equals, verify_temp_column_name, ) if TYPE_CHECKING: from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943) from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943) class IndexerLike(object): def __init__(self, psdf_or_psser: Union["Series", "DataFrame"]): from pyspark.pandas.frame import DataFrame from pyspark.pandas.series import Series assert isinstance( psdf_or_psser, (DataFrame, Series) ), "unexpected argument type: {}".format(type(psdf_or_psser)) self._psdf_or_psser = psdf_or_psser @property def _is_df(self) -> bool: from pyspark.pandas.frame import DataFrame return isinstance(self._psdf_or_psser, DataFrame) @property def _is_series(self) -> bool: from pyspark.pandas.series import Series return isinstance(self._psdf_or_psser, Series) @property def _psdf(self) -> "DataFrame": if self._is_df: return cast("DataFrame", self._psdf_or_psser) else: assert self._is_series return self._psdf_or_psser._psdf @property def _internal(self) -> InternalFrame: return self._psdf._internal class AtIndexer(IndexerLike): """ Access a single value for a row/column label pair. If the index is not unique, all matching pairs are returned as an array. Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to get a single value in a DataFrame or Series. .. note:: Unlike pandas, pandas-on-Spark only allows using ``at`` to get values but not to set them. .. note:: Warning: If ``row_index`` matches a lot of rows, large amounts of data will be fetched, potentially causing your machine to run out of memory. Raises ------ KeyError When label does not exist in DataFrame Examples -------- >>> psdf = ps.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 5], columns=['A', 'B', 'C']) >>> psdf A B C 4 0 2 3 5 0 4 1 5 10 20 30 Get value at specified row/column pair >>> psdf.at[4, 'B'] 2 Get array if an index occurs multiple times >>> psdf.at[5, 'B'] array([ 4, 20]) """ def __getitem__(self, key: Any) -> Union["Series", "DataFrame", Scalar]: if self._is_df: if not isinstance(key, tuple) or len(key) != 2: raise TypeError("Use DataFrame.at like .at[row_index, column_name]") row_sel, col_sel = key else: assert self._is_series, type(self._psdf_or_psser) if isinstance(key, tuple) and len(key) != 1: raise TypeError("Use Series.at like .at[row_index]") row_sel = key col_sel = self._psdf_or_psser._column_label if self._internal.index_level == 1: if not is_name_like_value(row_sel, allow_none=False, allow_tuple=False): raise ValueError("At based indexing on a single index can only have a single value") row_sel = (row_sel,) else: if not is_name_like_tuple(row_sel, allow_none=False): raise ValueError("At based indexing on multi-index can only have tuple values") if col_sel is not None: if not is_name_like_value(col_sel, allow_none=False): raise ValueError("At based indexing on multi-index can only have tuple values") if not is_name_like_tuple(col_sel): col_sel = (col_sel,) cond = reduce( lambda x, y: x & y, [scol == row for scol, row in zip(self._internal.index_spark_columns, row_sel)], ) pdf = ( self._internal.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME) .filter(cond) .select(self._internal.spark_column_for(col_sel)) .toPandas() ) if len(pdf) < 1: raise KeyError(name_like_string(row_sel)) values = cast(pd.DataFrame, pdf).iloc[:, 0].values return ( values if (len(row_sel) < self._internal.index_level or len(values) > 1) else values[0] ) class iAtIndexer(IndexerLike): """ Access a single value for a row/column pair by integer position. Similar to ``iloc``, in that both provide integer-based lookups. Use ``iat`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ KeyError When label does not exist in DataFrame Examples -------- >>> df = ps.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... columns=['A', 'B', 'C']) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] 1 Get value within a series >>> psser = ps.Series([1, 2, 3], index=[10, 20, 30]) >>> psser 10 1 20 2 30 3 dtype: int64 >>> psser.iat[1] 2 """ def __getitem__(self, key: Any) -> Union["Series", "DataFrame", Scalar]: if self._is_df: if not isinstance(key, tuple) or len(key) != 2: raise TypeError( "Use DataFrame.iat like .iat[row_integer_position, column_integer_position]" ) row_sel, col_sel = key if not isinstance(row_sel, int) or not isinstance(col_sel, int): raise ValueError("iAt based indexing can only have integer indexers") return self._psdf_or_psser.iloc[row_sel, col_sel] else: assert self._is_series, type(self._psdf_or_psser) if not isinstance(key, int) and len(key) != 1: raise TypeError("Use Series.iat like .iat[row_integer_position]") if not isinstance(key, int): raise ValueError("iAt based indexing can only have integer indexers") return self._psdf_or_psser.iloc[key] class LocIndexerLike(IndexerLike, metaclass=ABCMeta): def _select_rows(self, rows_sel: Any) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """ Dispatch the logic for select rows to more specific methods by `rows_sel` argument types. Parameters ---------- rows_sel : the key specified to select rows. Returns ------- Tuple of Spark column, int, int: * The Spark column for the condition to filter the rows. * The number of rows when the selection can be simplified by limit. * The remaining index rows if the result index size is shrunk. """ from pyspark.pandas.series import Series if rows_sel is None: return None, None, None elif isinstance(rows_sel, Series): return self._select_rows_by_series(rows_sel) elif isinstance(rows_sel, Column): return self._select_rows_by_spark_column(rows_sel) elif isinstance(rows_sel, slice): if rows_sel == slice(None): # If slice is None - select everything, so nothing to do return None, None, None return self._select_rows_by_slice(rows_sel) elif isinstance(rows_sel, tuple): return self._select_rows_else(rows_sel) elif is_list_like(rows_sel): return self._select_rows_by_iterable(rows_sel) else: return self._select_rows_else(rows_sel) def _select_cols( self, cols_sel: Any, missing_keys: Optional[List[Name]] = None ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """ Dispatch the logic for select columns to more specific methods by `cols_sel` argument types. Parameters ---------- cols_sel : the key specified to select columns. Returns ------- Tuple of list of column label, list of Spark columns, list of dtypes, bool: * The column labels selected. * The Spark columns selected. * The field metadata selected. * The boolean value whether Series should be returned or not. * The Series name if needed. """ from pyspark.pandas.series import Series if cols_sel is None: column_labels = self._internal.column_labels data_spark_columns = self._internal.data_spark_columns data_fields = self._internal.data_fields return column_labels, data_spark_columns, data_fields, False, None elif isinstance(cols_sel, Series): return self._select_cols_by_series(cols_sel, missing_keys) elif isinstance(cols_sel, Column): return self._select_cols_by_spark_column(cols_sel, missing_keys) elif isinstance(cols_sel, slice): if cols_sel == slice(None): # If slice is None - select everything, so nothing to do column_labels = self._internal.column_labels data_spark_columns = self._internal.data_spark_columns data_fields = self._internal.data_fields return column_labels, data_spark_columns, data_fields, False, None return self._select_cols_by_slice(cols_sel, missing_keys) elif isinstance(cols_sel, tuple): return self._select_cols_else(cols_sel, missing_keys) elif is_list_like(cols_sel): return self._select_cols_by_iterable(cols_sel, missing_keys) else: return self._select_cols_else(cols_sel, missing_keys) # Methods for row selection @abstractmethod def _select_rows_by_series( self, rows_sel: "Series" ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """Select rows by `Series` type key.""" pass @abstractmethod def _select_rows_by_spark_column( self, rows_sel: Column ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """Select rows by Spark `Column` type key.""" pass @abstractmethod def _select_rows_by_slice( self, rows_sel: slice ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """Select rows by `slice` type key.""" pass @abstractmethod def _select_rows_by_iterable( self, rows_sel: Iterable ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """Select rows by `Iterable` type key.""" pass @abstractmethod def _select_rows_else( self, rows_sel: Any ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: """Select rows by other type key.""" pass # Methods for col selection @abstractmethod def _select_cols_by_series( self, cols_sel: "Series", missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """Select columns by `Series` type key.""" pass @abstractmethod def _select_cols_by_spark_column( self, cols_sel: Column, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """Select columns by Spark `Column` type key.""" pass @abstractmethod def _select_cols_by_slice( self, cols_sel: slice, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """Select columns by `slice` type key.""" pass @abstractmethod def _select_cols_by_iterable( self, cols_sel: Iterable, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """Select columns by `Iterable` type key.""" pass @abstractmethod def _select_cols_else( self, cols_sel: Any, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: """Select columns by other type key.""" pass def __getitem__(self, key: Any) -> Union["Series", "DataFrame"]: from pyspark.pandas.frame import DataFrame from pyspark.pandas.series import Series, first_series if self._is_series: if isinstance(key, Series) and not same_anchor(key, self._psdf_or_psser): psdf = self._psdf_or_psser.to_frame() temp_col = verify_temp_column_name(psdf, "__temp_col__") psdf[temp_col] = key return type(self)(psdf[self._psdf_or_psser.name])[psdf[temp_col]] cond, limit, remaining_index = self._select_rows(key) if cond is None and limit is None: return self._psdf_or_psser column_label = self._psdf_or_psser._column_label column_labels = [column_label] data_spark_columns = [self._internal.spark_column_for(column_label)] data_fields = [self._internal.field_for(column_label)] returns_series = True series_name = self._psdf_or_psser.name else: assert self._is_df if isinstance(key, tuple): if len(key) != 2: raise SparkPandasIndexingError("Only accepts pairs of candidates") rows_sel, cols_sel = key else: rows_sel = key cols_sel = None if isinstance(rows_sel, Series) and not same_anchor(rows_sel, self._psdf_or_psser): psdf = self._psdf_or_psser.copy() temp_col = verify_temp_column_name(cast("DataFrame", psdf), "__temp_col__") psdf[temp_col] = rows_sel return type(self)(psdf)[psdf[temp_col], cols_sel][list(self._psdf_or_psser.columns)] cond, limit, remaining_index = self._select_rows(rows_sel) ( column_labels, data_spark_columns, data_fields, returns_series, series_name, ) = self._select_cols(cols_sel) if cond is None and limit is None and returns_series: psser = self._psdf_or_psser._psser_for(column_labels[0]) if series_name is not None and series_name != psser.name: psser = psser.rename(series_name) return psser if remaining_index is not None: index_spark_columns = self._internal.index_spark_columns[-remaining_index:] index_names = self._internal.index_names[-remaining_index:] index_fields = self._internal.index_fields[-remaining_index:] else: index_spark_columns = self._internal.index_spark_columns index_names = self._internal.index_names index_fields = self._internal.index_fields if len(column_labels) > 0: column_labels = column_labels.copy() column_labels_level = max( len(label) if label is not None else 1 for label in column_labels ) none_column = 0 for i, label in enumerate(column_labels): if label is None: label = (none_column,) none_column += 1 if len(label) < column_labels_level: label = tuple(list(label) + ([""]) * (column_labels_level - len(label))) column_labels[i] = label if i == 0 and none_column == 1: column_labels = [None] column_label_names = self._internal.column_label_names[-column_labels_level:] else: column_label_names = self._internal.column_label_names try: sdf = self._internal.spark_frame if cond is not None: index_columns = sdf.select(index_spark_columns).columns data_columns = sdf.select(data_spark_columns).columns sdf = sdf.filter(cond).select(index_spark_columns + data_spark_columns) index_spark_columns = [scol_for(sdf, col) for col in index_columns] data_spark_columns = [scol_for(sdf, col) for col in data_columns] if limit is not None: if limit >= 0: sdf = sdf.limit(limit) else: sdf = sdf.limit(sdf.count() + limit) sdf = sdf.drop(NATURAL_ORDER_COLUMN_NAME) except AnalysisException: raise KeyError( "[{}] don't exist in columns".format( [col._jc.toString() for col in data_spark_columns] # type: ignore ) ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=column_labels, data_spark_columns=data_spark_columns, data_fields=data_fields, column_label_names=column_label_names, ) psdf = DataFrame(internal) if returns_series: psdf_or_psser = first_series(psdf) if series_name is not None and series_name != psdf_or_psser.name: psdf_or_psser = psdf_or_psser.rename(series_name) else: psdf_or_psser = psdf if remaining_index is not None and remaining_index == 0: pdf_or_pser = psdf_or_psser.head(2).to_pandas() length = len(pdf_or_pser) if length == 0: raise KeyError(name_like_string(key)) elif length == 1: return pdf_or_pser.iloc[0] else: return psdf_or_psser else: return psdf_or_psser def __setitem__(self, key: Any, value: Any) -> None: from pyspark.pandas.frame import DataFrame from pyspark.pandas.series import Series, first_series if self._is_series: if ( isinstance(key, Series) and (isinstance(self, iLocIndexer) or not same_anchor(key, self._psdf_or_psser)) ) or ( isinstance(value, Series) and (isinstance(self, iLocIndexer) or not same_anchor(value, self._psdf_or_psser)) ): if self._psdf_or_psser.name is None: psdf = self._psdf_or_psser.to_frame() column_label = psdf._internal.column_labels[0] else: psdf = self._psdf_or_psser._psdf.copy() column_label = self._psdf_or_psser._column_label temp_natural_order = verify_temp_column_name(psdf, "__temp_natural_order__") temp_key_col = verify_temp_column_name(psdf, "__temp_key_col__") temp_value_col = verify_temp_column_name(psdf, "__temp_value_col__") psdf[temp_natural_order] = F.monotonically_increasing_id() if isinstance(key, Series): psdf[temp_key_col] = key if isinstance(value, Series): psdf[temp_value_col] = value psdf = psdf.sort_values(temp_natural_order).drop(temp_natural_order) psser = psdf._psser_for(column_label) if isinstance(key, Series): key = F.col( "`{}`".format(psdf[temp_key_col]._internal.data_spark_column_names[0]) ) if isinstance(value, Series): value = F.col( "`{}`".format(psdf[temp_value_col]._internal.data_spark_column_names[0]) ) type(self)(psser)[key] = value if self._psdf_or_psser.name is None: psser = psser.rename() self._psdf_or_psser._psdf._update_internal_frame( psser._psdf[ self._psdf_or_psser._psdf._internal.column_labels ]._internal.resolved_copy, requires_same_anchor=False, ) return if isinstance(value, DataFrame): raise ValueError("Incompatible indexer with DataFrame") cond, limit, remaining_index = self._select_rows(key) if cond is None: cond = SF.lit(True) if limit is not None: cond = cond & ( self._internal.spark_frame[cast(iLocIndexer, self)._sequence_col] < SF.lit(limit) ) if isinstance(value, (Series, Column)): if remaining_index is not None and remaining_index == 0: raise ValueError( "No axis named {} for object type {}".format(key, type(value).__name__) ) if isinstance(value, Series): value = value.spark.column else: value = SF.lit(value) scol = ( F.when(cond, value) .otherwise(self._internal.spark_column_for(self._psdf_or_psser._column_label)) .alias(name_like_string(self._psdf_or_psser.name or SPARK_DEFAULT_SERIES_NAME)) ) internal = self._internal.with_new_spark_column( self._psdf_or_psser._column_label, scol # TODO: dtype? ) self._psdf_or_psser._psdf._update_internal_frame(internal, requires_same_anchor=False) else: assert self._is_df if isinstance(key, tuple): if len(key) != 2: raise SparkPandasIndexingError("Only accepts pairs of candidates") rows_sel, cols_sel = key else: rows_sel = key cols_sel = None if isinstance(value, DataFrame): if len(value.columns) == 1: value = first_series(value) else: raise ValueError("Only a dataframe with one column can be assigned") if ( isinstance(rows_sel, Series) and ( isinstance(self, iLocIndexer) or not same_anchor(rows_sel, self._psdf_or_psser) ) ) or ( isinstance(value, Series) and (isinstance(self, iLocIndexer) or not same_anchor(value, self._psdf_or_psser)) ): psdf = cast(DataFrame, self._psdf_or_psser.copy()) temp_natural_order = verify_temp_column_name(psdf, "__temp_natural_order__") temp_key_col = verify_temp_column_name(psdf, "__temp_key_col__") temp_value_col = verify_temp_column_name(psdf, "__temp_value_col__") psdf[temp_natural_order] = F.monotonically_increasing_id() if isinstance(rows_sel, Series): psdf[temp_key_col] = rows_sel if isinstance(value, Series): psdf[temp_value_col] = value psdf = psdf.sort_values(temp_natural_order).drop(temp_natural_order) if isinstance(rows_sel, Series): rows_sel = F.col( "`{}`".format(psdf[temp_key_col]._internal.data_spark_column_names[0]) ) if isinstance(value, Series): value = F.col( "`{}`".format(psdf[temp_value_col]._internal.data_spark_column_names[0]) ) type(self)(psdf)[rows_sel, cols_sel] = value self._psdf_or_psser._update_internal_frame( psdf[list(self._psdf_or_psser.columns)]._internal.resolved_copy, requires_same_anchor=False, ) return cond, limit, remaining_index = self._select_rows(rows_sel) missing_keys = [] # type: Optional[List[Name]] _, data_spark_columns, _, _, _ = self._select_cols(cols_sel, missing_keys=missing_keys) if cond is None: cond = SF.lit(True) if limit is not None: cond = cond & ( self._internal.spark_frame[cast(iLocIndexer, self)._sequence_col] < SF.lit(limit) ) if isinstance(value, (Series, Column)): if remaining_index is not None and remaining_index == 0: raise ValueError("Incompatible indexer with Series") if len(data_spark_columns) > 1: raise ValueError("shape mismatch") if isinstance(value, Series): value = value.spark.column else: value = SF.lit(value) new_data_spark_columns = [] new_fields = [] for new_scol, spark_column_name, new_field in zip( self._internal.data_spark_columns, self._internal.data_spark_column_names, self._internal.data_fields, ): for scol in data_spark_columns: if spark_column_equals(new_scol, scol): new_scol = F.when(cond, value).otherwise(scol).alias(spark_column_name) new_field = InternalField.from_struct_field( self._internal.spark_frame.select(new_scol).schema[0], use_extension_dtypes=new_field.is_extension_dtype, ) break new_data_spark_columns.append(new_scol) new_fields.append(new_field) column_labels = self._internal.column_labels.copy() for missing in missing_keys: if is_name_like_tuple(missing): label = cast(Label, missing) else: label = cast(Label, (missing,)) if len(label) < self._internal.column_labels_level: label = tuple( list(label) + ([""] * (self._internal.column_labels_level - len(label))) ) elif len(label) > self._internal.column_labels_level: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(label), self._internal.column_labels_level ) ) column_labels.append(label) new_data_spark_columns.append(F.when(cond, value).alias(name_like_string(label))) new_fields.append(None) internal = self._internal.with_new_columns( new_data_spark_columns, column_labels=column_labels, data_fields=new_fields ) self._psdf_or_psser._update_internal_frame(internal, requires_same_anchor=False) class LocIndexer(LocIndexerLike): """ Access a group of rows and columns by label(s) or a boolean Series. ``.loc[]`` is primarily label based, but may also be used with a conditional boolean Series derived from the DataFrame or Series. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index) for column selection. - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'``. - A conditional boolean Series derived from the DataFrame or Series - A boolean array of the same length as the column axis being sliced, e.g. ``[True, False, True]``. - An alignable boolean pandas Series to the column axis being sliced. The index of the key will be aligned before masking. Not allowed inputs which pandas allows are: - A boolean array of the same length as the row axis being sliced, e.g. ``[True, False, True]``. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing (one of the above) .. note:: MultiIndex is not supported yet. .. note:: Note that contrary to usual python slices, **both** the start and the stop are included, and the step of the slice is not allowed. .. note:: With a list or array of labels for row selection, pandas-on-Spark behaves as a filter without reordering by the labels. See Also -------- Series.loc : Access group of values using labels. Examples -------- **Getting values** >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 Single label. Note this returns the row as a Series. >>> df.loc['viper'] max_speed 4 shield 5 Name: viper, dtype: int64 List of labels. Note using ``[[]]`` returns a DataFrame. Also note that pandas-on-Spark behaves just a filter without reordering by the labels. >>> df.loc[['viper', 'sidewinder']] max_speed shield viper 4 5 sidewinder 7 8 >>> df.loc[['sidewinder', 'viper']] max_speed shield viper 4 5 sidewinder 7 8 Single label for column. >>> df.loc['cobra', 'shield'] 2 List of labels for row. >>> df.loc[['cobra'], 'shield'] cobra 2 Name: shield, dtype: int64 List of labels for column. >>> df.loc['cobra', ['shield']] shield 2 Name: cobra, dtype: int64 List of labels for both row and column. >>> df.loc[['cobra'], ['shield']] shield cobra 2 Slice with labels for row and single label for column. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc['cobra':'viper', 'max_speed'] cobra 1 viper 4 Name: max_speed, dtype: int64 Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] max_speed shield sidewinder 7 8 Conditional that returns a boolean Series with column labels specified >>> df.loc[df['shield'] > 6, ['max_speed']] max_speed sidewinder 7 A boolean array of the same length as the column axis being sliced. >>> df.loc[:, [False, True]] shield cobra 2 viper 5 sidewinder 8 An alignable boolean Series to the column axis being sliced. >>> df.loc[:, pd.Series([False, True], index=['max_speed', 'shield'])] shield cobra 2 viper 5 sidewinder 8 **Setting values** Setting value for all items matching the list of labels. >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 >>> df max_speed shield cobra 1 2 viper 4 50 sidewinder 7 50 Setting value for an entire row >>> df.loc['cobra'] = 10 >>> df max_speed shield cobra 10 10 viper 4 50 sidewinder 7 50 Set value for an entire column >>> df.loc[:, 'max_speed'] = 30 >>> df max_speed shield cobra 30 10 viper 30 50 sidewinder 30 50 Set value for an entire list of columns >>> df.loc[:, ['max_speed', 'shield']] = 100 >>> df max_speed shield cobra 100 100 viper 100 100 sidewinder 100 100 Set value with Series >>> df.loc[:, 'shield'] = df['shield'] * 2 >>> df max_speed shield cobra 100 200 viper 100 200 sidewinder 100 200 **Getting values on a DataFrame with an index that has integer labels** Another example using integers for the index >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=[7, 8, 9], ... columns=['max_speed', 'shield']) >>> df max_speed shield 7 1 2 8 4 5 9 7 8 Slice with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 """ @staticmethod def _NotImplemented(description: str) -> SparkPandasNotImplementedError: return SparkPandasNotImplementedError( description=description, pandas_function=".loc[..., ...]", spark_target_function="select, where", ) def _select_rows_by_series( self, rows_sel: "Series" ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: assert isinstance(rows_sel.spark.data_type, BooleanType), rows_sel.spark.data_type return rows_sel.spark.column, None, None def _select_rows_by_spark_column( self, rows_sel: Column ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: spark_type = self._internal.spark_frame.select(rows_sel).schema[0].dataType assert isinstance(spark_type, BooleanType), spark_type return rows_sel, None, None def _select_rows_by_slice( self, rows_sel: slice ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: from pyspark.pandas.indexes import MultiIndex if rows_sel.step is not None: raise LocIndexer._NotImplemented("Cannot use step with Spark.") elif self._internal.index_level == 1: sdf = self._internal.spark_frame index = self._psdf_or_psser.index index_column = index.to_series() index_data_type = index_column.spark.data_type start = rows_sel.start stop = rows_sel.stop # get natural order from '__natural_order__' from start to stop # to keep natural order. start_and_stop = ( sdf.select(index_column.spark.column, NATURAL_ORDER_COLUMN_NAME) .where( (index_column.spark.column == SF.lit(start).cast(index_data_type)) | (index_column.spark.column == SF.lit(stop).cast(index_data_type)) ) .collect() ) start = [row[1] for row in start_and_stop if row[0] == start] start = start[0] if len(start) > 0 else None stop = [row[1] for row in start_and_stop if row[0] == stop] stop = stop[-1] if len(stop) > 0 else None conds = [] # type: List[Column] if start is not None: conds.append(F.col(NATURAL_ORDER_COLUMN_NAME) >= SF.lit(start).cast(LongType())) if stop is not None: conds.append(F.col(NATURAL_ORDER_COLUMN_NAME) <= SF.lit(stop).cast(LongType())) # if index order is not monotonic increasing or decreasing # and specified values don't exist in index, raise KeyError if (start is None and rows_sel.start is not None) or ( stop is None and rows_sel.stop is not None ): inc = index_column.is_monotonic_increasing if inc is False: dec = index_column.is_monotonic_decreasing if start is None and rows_sel.start is not None: start = rows_sel.start if inc is not False: conds.append( index_column.spark.column >= SF.lit(start).cast(index_data_type) ) elif dec is not False: conds.append( index_column.spark.column <= SF.lit(start).cast(index_data_type) ) else: raise KeyError(rows_sel.start) if stop is None and rows_sel.stop is not None: stop = rows_sel.stop if inc is not False: conds.append( index_column.spark.column <= SF.lit(stop).cast(index_data_type) ) elif dec is not False: conds.append( index_column.spark.column >= SF.lit(stop).cast(index_data_type) ) else: raise KeyError(rows_sel.stop) return reduce(lambda x, y: x & y, conds), None, None else: index = self._psdf_or_psser.index index_data_type = [f.dataType for f in index.to_series().spark.data_type] start = rows_sel.start if start is not None: if not isinstance(start, tuple): start = (start,) if len(start) == 0: start = None stop = rows_sel.stop if stop is not None: if not isinstance(stop, tuple): stop = (stop,) if len(stop) == 0: stop = None depth = max( len(start) if start is not None else 0, len(stop) if stop is not None else 0 ) if depth == 0: return None, None, None elif ( depth > self._internal.index_level or not index.droplevel(list(range(self._internal.index_level)[depth:])).is_monotonic ): raise KeyError( "Key length ({}) was greater than MultiIndex sort depth".format(depth) ) conds = [] if start is not None: cond = SF.lit(True) for scol, value, dt in list( zip(self._internal.index_spark_columns, start, index_data_type) )[::-1]: compare = MultiIndex._comparator_for_monotonic_increasing(dt) cond = F.when(scol.eqNullSafe(SF.lit(value).cast(dt)), cond).otherwise( compare(scol, SF.lit(value).cast(dt), Column.__gt__) ) conds.append(cond) if stop is not None: cond = SF.lit(True) for scol, value, dt in list( zip(self._internal.index_spark_columns, stop, index_data_type) )[::-1]: compare = MultiIndex._comparator_for_monotonic_increasing(dt) cond = F.when(scol.eqNullSafe(SF.lit(value).cast(dt)), cond).otherwise( compare(scol, SF.lit(value).cast(dt), Column.__lt__) ) conds.append(cond) return reduce(lambda x, y: x & y, conds), None, None def _select_rows_by_iterable( self, rows_sel: Iterable ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: rows_sel = list(rows_sel) if len(rows_sel) == 0: return SF.lit(False), None, None elif self._internal.index_level == 1: index_column = self._psdf_or_psser.index.to_series() index_data_type = index_column.spark.data_type if len(rows_sel) == 1: return ( index_column.spark.column == SF.lit(rows_sel[0]).cast(index_data_type), None, None, ) else: return ( index_column.spark.column.isin( [SF.lit(r).cast(index_data_type) for r in rows_sel] ), None, None, ) else: raise LocIndexer._NotImplemented("Cannot select with MultiIndex with Spark.") def _select_rows_else( self, rows_sel: Any ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: if not isinstance(rows_sel, tuple): rows_sel = (rows_sel,) if len(rows_sel) > self._internal.index_level: raise SparkPandasIndexingError("Too many indexers") rows = [scol == value for scol, value in zip(self._internal.index_spark_columns, rows_sel)] return ( reduce(lambda x, y: x & y, rows), None, self._internal.index_level - len(rows_sel), ) def _get_from_multiindex_column( self, key: Optional[Label], missing_keys: Optional[List[Name]], labels: Optional[List[Tuple[Label, Label]]] = None, recursed: int = 0, ) -> Tuple[List[Label], Optional[List[Column]], List[InternalField], bool, Optional[Name]]: """Select columns from multi-index columns.""" assert isinstance(key, tuple) if labels is None: labels = [(label, label) for label in self._internal.column_labels] for k in key: labels = [ (label, None if lbl is None else lbl[1:]) for label, lbl in labels if (lbl is None and k is None) or (lbl is not None and lbl[0] == k) ] if len(labels) == 0: if missing_keys is None: raise KeyError(k) else: missing_keys.append(key) return [], [], [], False, None if all(lbl is not None and len(lbl) > 0 and lbl[0] == "" for _, lbl in labels): # If the head is '', drill down recursively. labels = [(label, tuple([str(key), *lbl[1:]])) for i, (label, lbl) in enumerate(labels)] return self._get_from_multiindex_column((str(key),), missing_keys, labels, recursed + 1) else: returns_series = all(lbl is None or len(lbl) == 0 for _, lbl in labels) if returns_series: label_set = set(label for label, _ in labels) assert len(label_set) == 1 label = list(label_set)[0] column_labels = [label] data_spark_columns = [self._internal.spark_column_for(label)] data_fields = [self._internal.field_for(label)] if label is None: series_name = None # type: Name else: if recursed > 0: label = label[:-recursed] series_name = label if len(label) > 1 else label[0] else: column_labels = [ None if lbl is None or lbl == (None,) else lbl for _, lbl in labels ] data_spark_columns = [self._internal.spark_column_for(label) for label, _ in labels] data_fields = [self._internal.field_for(label) for label, _ in labels] series_name = None return column_labels, data_spark_columns, data_fields, returns_series, series_name def _select_cols_by_series( self, cols_sel: "Series", missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: column_labels = cols_sel._internal.column_labels data_spark_columns = cols_sel._internal.data_spark_columns data_fields = cols_sel._internal.data_fields return column_labels, data_spark_columns, data_fields, True, None def _select_cols_by_spark_column( self, cols_sel: Column, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: column_labels = [ (self._internal.spark_frame.select(cols_sel).columns[0],) ] # type: List[Label] data_spark_columns = [cols_sel] return column_labels, data_spark_columns, None, True, None def _select_cols_by_slice( self, cols_sel: slice, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: start, stop = self._psdf_or_psser.columns.slice_locs( start=cols_sel.start, end=cols_sel.stop ) column_labels = self._internal.column_labels[start:stop] data_spark_columns = self._internal.data_spark_columns[start:stop] data_fields = self._internal.data_fields[start:stop] return column_labels, data_spark_columns, data_fields, False, None def _select_cols_by_iterable( self, cols_sel: Iterable, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: from pyspark.pandas.series import Series if all(isinstance(key, Series) for key in cols_sel): column_labels = [key._column_label for key in cols_sel] data_spark_columns = [key.spark.column for key in cols_sel] data_fields = [key._internal.data_fields[0] for key in cols_sel] elif all(isinstance(key, Column) for key in cols_sel): column_labels = [ (self._internal.spark_frame.select(col).columns[0],) for col in cols_sel ] data_spark_columns = list(cols_sel) data_fields = None elif all(isinstance(key, bool) for key in cols_sel) or all( isinstance(key, np.bool_) for key in cols_sel ): if len(cast(Sized, cols_sel)) != len(self._internal.column_labels): raise IndexError( "Boolean index has wrong length: %s instead of %s" % (len(cast(Sized, cols_sel)), len(self._internal.column_labels)) ) if isinstance(cols_sel, pd.Series): if not cols_sel.index.sort_values().equals(self._psdf.columns.sort_values()): raise SparkPandasIndexingError( "Unalignable boolean Series provided as indexer " "(index of the boolean Series and of the indexed object do not match)" ) else: column_labels = [ column_label for column_label in self._internal.column_labels if cols_sel[column_label if len(column_label) > 1 else column_label[0]] ] data_spark_columns = [ self._internal.spark_column_for(column_label) for column_label in column_labels ] data_fields = [ self._internal.field_for(column_label) for column_label in column_labels ] else: column_labels = [ self._internal.column_labels[i] for i, col in enumerate(cols_sel) if col ] data_spark_columns = [ self._internal.data_spark_columns[i] for i, col in enumerate(cols_sel) if col ] data_fields = [ self._internal.data_fields[i] for i, col in enumerate(cols_sel) if col ] elif any(isinstance(key, tuple) for key in cols_sel) and any( not is_name_like_tuple(key) for key in cols_sel ): raise TypeError( "Expected tuple, got {}".format( type(set(key for key in cols_sel if not is_name_like_tuple(key)).pop()) ) ) else: if missing_keys is None and all(isinstance(key, tuple) for key in cols_sel): level = self._internal.column_labels_level if any(len(key) != level for key in cols_sel): raise ValueError("All the key level should be the same as column index level.") column_labels = [] data_spark_columns = [] data_fields = [] for key in cols_sel: found = False for label in self._internal.column_labels: if label == key or label[0] == key: column_labels.append(label) data_spark_columns.append(self._internal.spark_column_for(label)) data_fields.append(self._internal.field_for(label)) found = True if not found: if missing_keys is None: raise KeyError("['{}'] not in index".format(name_like_string(key))) else: missing_keys.append(key) return column_labels, data_spark_columns, data_fields, False, None def _select_cols_else( self, cols_sel: Any, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: if not is_name_like_tuple(cols_sel): cols_sel = (cols_sel,) return self._get_from_multiindex_column(cols_sel, missing_keys) class iLocIndexer(LocIndexerLike): """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a conditional boolean Series. Allowed inputs are: - An integer for column selection, e.g. ``5``. - A list or array of integers for row selection with distinct index values, e.g. ``[3, 4, 0]`` - A list or array of integers for column selection, e.g. ``[4, 3, 0]``. - A boolean array for column selection. - A slice object with ints for row and column selection, e.g. ``1:7``. Not allowed inputs which pandas allows are: - A list or array of integers for row selection with duplicated indexes, e.g. ``[4, 4, 0]``. - A boolean array for row selection. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See Also -------- DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = ps.DataFrame(mydict, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** A scalar integer for row selection. >>> df.iloc[1] a 100 b 200 c 300 d 400 Name: 1, dtype: int64 >>> df.iloc[[0]] a b c d 0 1 2 3 4 With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[:1, 1] 0 2 Name: b, dtype: int64 With lists of integers. >>> df.iloc[:2, [1, 3]] b d 0 2 4 1 200 400 With `slice` objects. >>> df.iloc[:2, 0:3] a b c 0 1 2 3 1 100 200 300 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 **Setting values** Setting value for all items matching the list of labels. >>> df.iloc[[1, 2], [1]] = 50 >>> df a b c d 0 1 2 3 4 1 100 50 300 400 2 1000 50 3000 4000 Setting value for an entire row >>> df.iloc[0] = 10 >>> df a b c d 0 10 10 10 10 1 100 50 300 400 2 1000 50 3000 4000 Set value for an entire column >>> df.iloc[:, 2] = 30 >>> df a b c d 0 10 10 30 10 1 100 50 30 400 2 1000 50 30 4000 Set value for an entire list of columns >>> df.iloc[:, [2, 3]] = 100 >>> df a b c d 0 10 10 100 100 1 100 50 100 100 2 1000 50 100 100 Set value with Series >>> df.iloc[:, 3] = df.iloc[:, 3] * 2 >>> df a b c d 0 10 10 100 200 1 100 50 100 200 2 1000 50 100 200 """ @staticmethod def _NotImplemented(description: str) -> SparkPandasNotImplementedError: return SparkPandasNotImplementedError( description=description, pandas_function=".iloc[..., ...]", spark_target_function="select, where", ) @lazy_property def _internal(self) -> "InternalFrame": # Use resolved_copy to fix the natural order. internal = super()._internal.resolved_copy sdf = InternalFrame.attach_distributed_sequence_column( internal.spark_frame, column_name=self._sequence_col ) return internal.with_new_sdf(spark_frame=sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)) @lazy_property def _sequence_col(self) -> str: # Use resolved_copy to fix the natural order. internal = super()._internal.resolved_copy return verify_temp_column_name(internal.spark_frame, "__distributed_sequence_column__") def _select_rows_by_series( self, rows_sel: "Series" ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: raise iLocIndexer._NotImplemented( ".iloc requires numeric slice, conditional " "boolean Index or a sequence of positions as int, " "got {}".format(type(rows_sel)) ) def _select_rows_by_spark_column( self, rows_sel: Column ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: raise iLocIndexer._NotImplemented( ".iloc requires numeric slice, conditional " "boolean Index or a sequence of positions as int, " "got {}".format(type(rows_sel)) ) def _select_rows_by_slice( self, rows_sel: slice ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: def verify_type(i: int) -> None: if not isinstance(i, int): raise TypeError( "cannot do slice indexing with these indexers [{}] of {}".format(i, type(i)) ) has_negative = False start = rows_sel.start if start is not None: verify_type(start) if start == 0: start = None elif start < 0: has_negative = True stop = rows_sel.stop if stop is not None: verify_type(stop) if stop < 0: has_negative = True step = rows_sel.step if step is not None: verify_type(step) if step == 0: raise ValueError("slice step cannot be zero") else: step = 1 if start is None and step == 1: return None, stop, None sdf = self._internal.spark_frame sequence_scol = sdf[self._sequence_col] if has_negative or (step < 0 and start is None): cnt = sdf.count() cond = [] if start is not None: if start < 0: start = start + cnt if step >= 0: cond.append(sequence_scol >= SF.lit(start).cast(LongType())) else: cond.append(sequence_scol <= SF.lit(start).cast(LongType())) if stop is not None: if stop < 0: stop = stop + cnt if step >= 0: cond.append(sequence_scol < SF.lit(stop).cast(LongType())) else: cond.append(sequence_scol > SF.lit(stop).cast(LongType())) if step != 1: if step > 0: start = start or 0 else: start = start or (cnt - 1) cond.append(((sequence_scol - start) % SF.lit(step).cast(LongType())) == SF.lit(0)) return reduce(lambda x, y: x & y, cond), None, None def _select_rows_by_iterable( self, rows_sel: Iterable ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: sdf = self._internal.spark_frame if any(isinstance(key, (int, np.int64, np.int32)) and key < 0 for key in rows_sel): offset = sdf.count() else: offset = 0 new_rows_sel = [] for key in list(rows_sel): if not isinstance(key, (int, np.int64, np.int32)): raise TypeError( "cannot do positional indexing with these indexers [{}] of {}".format( key, type(key) ) ) if key < 0: key = key + offset new_rows_sel.append(key) if len(new_rows_sel) != len(set(new_rows_sel)): raise NotImplementedError( "Duplicated row selection is not currently supported; " "however, normalised index was [%s]" % new_rows_sel ) sequence_scol = sdf[self._sequence_col] cond = [] for key in new_rows_sel: cond.append(sequence_scol == SF.lit(int(key)).cast(LongType())) if len(cond) == 0: cond = [SF.lit(False)] return reduce(lambda x, y: x | y, cond), None, None def _select_rows_else( self, rows_sel: Any ) -> Tuple[Optional[Column], Optional[int], Optional[int]]: if isinstance(rows_sel, int): sdf = self._internal.spark_frame return (sdf[self._sequence_col] == rows_sel), None, 0 elif isinstance(rows_sel, tuple): raise SparkPandasIndexingError("Too many indexers") else: raise iLocIndexer._NotImplemented( ".iloc requires numeric slice, conditional " "boolean Index or a sequence of positions as int, " "got {}".format(type(rows_sel)) ) def _select_cols_by_series( self, cols_sel: "Series", missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: raise ValueError( "Location based indexing can only have [integer, integer slice, " "listlike of integers, boolean array] types, got {}".format(cols_sel) ) def _select_cols_by_spark_column( self, cols_sel: Column, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: raise ValueError( "Location based indexing can only have [integer, integer slice, " "listlike of integers, boolean array] types, got {}".format(cols_sel) ) def _select_cols_by_slice( self, cols_sel: slice, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: if all( s is None or isinstance(s, int) for s in (cols_sel.start, cols_sel.stop, cols_sel.step) ): column_labels = self._internal.column_labels[cols_sel] data_spark_columns = self._internal.data_spark_columns[cols_sel] data_fields = self._internal.data_fields[cols_sel] return column_labels, data_spark_columns, data_fields, False, None else: not_none = ( cols_sel.start if cols_sel.start is not None else cols_sel.stop if cols_sel.stop is not None else cols_sel.step ) raise TypeError( "cannot do slice indexing with these indexers {} of {}".format( not_none, type(not_none) ) ) def _select_cols_by_iterable( self, cols_sel: Iterable, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: if all(isinstance(s, bool) for s in cols_sel): cols_sel = [i for i, s in enumerate(cols_sel) if s] if all(isinstance(s, int) for s in cols_sel): column_labels = [self._internal.column_labels[s] for s in cols_sel] data_spark_columns = [self._internal.data_spark_columns[s] for s in cols_sel] data_fields = [self._internal.data_fields[s] for s in cols_sel] return column_labels, data_spark_columns, data_fields, False, None else: raise TypeError("cannot perform reduce with flexible type") def _select_cols_else( self, cols_sel: Any, missing_keys: Optional[List[Name]] ) -> Tuple[ List[Label], Optional[List[Column]], Optional[List[InternalField]], bool, Optional[Name], ]: if isinstance(cols_sel, int): if cols_sel > len(self._internal.column_labels): raise KeyError(cols_sel) column_labels = [self._internal.column_labels[cols_sel]] data_spark_columns = [self._internal.data_spark_columns[cols_sel]] data_fields = [self._internal.data_fields[cols_sel]] return column_labels, data_spark_columns, data_fields, True, None else: raise ValueError( "Location based indexing can only have [integer, integer slice, " "listlike of integers, boolean array] types, got {}".format(cols_sel) ) def __setitem__(self, key: Any, value: Any) -> None: if is_list_like(value) and not isinstance(value, Column): iloc_item = self[key] if not is_list_like(key) or not is_list_like(iloc_item): raise ValueError("setting an array element with a sequence.") else: shape_iloc_item = iloc_item.shape len_iloc_item = shape_iloc_item[0] len_value = len(value) if len_iloc_item != len_value: if self._is_series: raise ValueError( "cannot set using a list-like indexer with a different length than " "the value" ) else: raise ValueError( "shape mismatch: value array of shape ({},) could not be broadcast " "to indexing result of shape {}".format(len_value, shape_iloc_item) ) super().__setitem__(key, value) # Update again with resolved_copy to drop extra columns. self._psdf._update_internal_frame( self._psdf._internal.resolved_copy, requires_same_anchor=False ) # Clean up implicitly cached properties to be able to reuse the indexer. del self._internal del self._sequence_col def _test() -> None: import os import doctest import sys from pyspark.sql import SparkSession import pyspark.pandas.indexing os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.pandas.indexing.__dict__.copy() globs["ps"] = pyspark.pandas spark = ( SparkSession.builder.master("local[4]") .appName("pyspark.pandas.indexing tests") .getOrCreate() ) (failure_count, test_count) = doctest.testmod( pyspark.pandas.indexing, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE, ) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
chuckchen/spark
python/pyspark/pandas/indexing.py
Python
apache-2.0
67,583
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.debug.lib import check_numerics_callback from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import layers from tensorflow.python.keras import models from tensorflow.python.keras import optimizer_v2 from tensorflow.python.keras.applications import mobilenet_v2 from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest class LimitStringLengthTest(test_util.TensorFlowTestCase): def testLimitStringLengthWithExplicitLimit(self): self.assertEqual( check_numerics_callback.limit_string_length("", max_len=2), "") self.assertEqual( check_numerics_callback.limit_string_length("e", max_len=2), "e") self.assertEqual( check_numerics_callback.limit_string_length("de", max_len=2), "de") self.assertEqual( check_numerics_callback.limit_string_length("abcde", max_len=2), "...de") def testLimitStringLengthWithNoLimit(self): self.assertEqual(check_numerics_callback.limit_string_length( "A" * 100 + "B", max_len=None), "A" * 100 + "B") self.assertEqual( check_numerics_callback.limit_string_length("", max_len=None), "") def testLimitStringLengthWithDefaultLimit(self): self.assertEqual( check_numerics_callback.limit_string_length("A" * 50 + "B"), "..." + "A" * 49 + "B") class CheckNumericsCallbackTest(test_util.TensorFlowTestCase): def tearDown(self): check_numerics_callback.disable_check_numerics() super(CheckNumericsCallbackTest, self).tearDown() def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func): caught = None try: func() except errors.InvalidArgumentError as error: caught = error self.assertTrue(caught, "Failed to catch expected InvalidArgumentError") return caught.message def testCatchEagerOpFloat32Inf(self): """Test catching Infinity in eager op execution: float32.""" check_numerics_callback.enable_check_numerics() x = constant_op.constant([2.0, 3.0]) y = constant_op.constant([1.0, 0.0]) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: x / y) # Check the content of the error message. self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertIn("shape: (2,)\n", message) self.assertIn("# of +Inf elements: 1\n", message) self.assertIn("0: %s" % x, message) self.assertIn("1: %s" % y, message) def testEnableCheckNumericsIsIdempotent(self): """Two calls to enable_check_numerics() have same effect as one call.""" check_numerics_callback.enable_check_numerics() check_numerics_callback.enable_check_numerics() x = constant_op.constant([2.0, 3.0]) y = constant_op.constant([1.0, 0.0]) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: x / y) # Check the content of the error message. self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertIn("shape: (2,)\n", message) self.assertIn("# of +Inf elements: 1\n", message) self.assertIn("0: %s" % x, message) self.assertIn("1: %s" % y, message) def testCallingDisableCheckNumericsWithoutEnablingFirstIsTolerated(self): check_numerics_callback.disable_check_numerics() def testCatchEagerOpFloat16NaN(self): """Test catching Infinity in eager op execution: float16.""" check_numerics_callback.enable_check_numerics() def log1p(x): y = 1.0 + x return math_ops.log(y) x = constant_op.constant([[-1.0]], dtype=dtypes.float16) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: log1p(x)) # Check the content of the error message. self.assertTrue(re.search(r"eagerly-executing op.*\"Log\"", message)) self.assertTrue(re.search(r"dtype.*float16", message)) self.assertIn("shape: (1, 1)\n", message) self.assertIn("# of -Inf elements: 1\n", message) self.assertTrue(re.search(r"Input tensor.*0\.", message)) def testNoCatchEagerOpExecution(self): """Test running multiple steps of eager execution without Inf/NaN.""" check_numerics_callback.enable_check_numerics() x = constant_op.constant([2.0, 3.0]) y = constant_op.constant([1.0, 0.0]) self.assertAllClose((x + y) * (x - y), [3.0, 9.0]) @test_util.run_in_graph_and_eager_modes def testCatchFunctionOpInfFloat64(self): """Test catching infinites generated in a FuncGraph.""" check_numerics_callback.enable_check_numerics() @def_function.function def divide_sum_with_diff(x, y): w1 = x + y w2 = x - y u = w1 / w2 return u * 2.0 x = constant_op.constant(2.0, dtype=dtypes.float64) y = constant_op.constant(2.0, dtype=dtypes.float64) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: self.evaluate(divide_sum_with_diff(x, y))) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message)) self.assertTrue(re.search(r"dtype.*float64", message)) self.assertIn("shape: ()\n", message) self.assertIn("Input tensors (2):", message) # Check that the correct input ops are printed. self.assertTrue(re.search(r"0:.*Tensor.*add:0", message)) self.assertTrue(re.search(r"1:.*Tensor.*sub:0", message)) # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) self.assertIn("u = w1 / w2", message) @test_util.run_in_graph_and_eager_modes @test_util.disable_xla( "TODO(b/141100809): XLA has no way to assert inside of a kernel.") def testControlFlowGraphWithNaNBFloat16(self): """Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph.""" check_numerics_callback.enable_check_numerics() @def_function.function def my_conditional(x): if math_ops.less(math_ops.reduce_sum(x), 0.0): return math_ops.log(x) else: return math_ops.log(-x) x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: self.evaluate(my_conditional(x))) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"Log\"", message)) self.assertTrue(re.search(r"dtype.*bfloat16", message)) self.assertIn("shape: (3,)\n", message) # Check that the correct input op is printed. self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg", message)) # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) self.assertIn("return math_ops.log(-x)", message) if context.executing_eagerly(): # The code path for raising error is slightly different under graph mode. self.assertTrue(message.endswith("\n")) @test_util.run_in_graph_and_eager_modes @test_util.disable_xla( "There is a small inconsistency in the step at which overflow happens: " "128 (without XLA) and 127 (with XLA).") def testOverflowInTfFunction(self): """Test catching Infinity caused by overflow in a tf.function with while.""" check_numerics_callback.enable_check_numerics() @def_function.function def accumulation_function(counter, lim, accum): while math_ops.less(counter, lim): accum.assign(accum * 2.0) counter.assign_add(1) counter = variables.Variable(0, dtype=dtypes.int32) # Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the # 1000-step limit is sufficient. lim = constant_op.constant(1000, dtype=dtypes.int32) accum = variables.Variable(1.0) if not context.executing_eagerly(): self.evaluate([counter.initializer, accum.initializer]) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: self.evaluate(accumulation_function(counter, lim, accum))) self.assertAllClose(self.evaluate(counter), 128) # Check the content of the error message. # The overflow to +Infinity happens during the `* 2.0` operation. self.assertTrue(re.search(r"graph op.*\"Mul\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertIn("shape: ()\n", message) # Check that the correct input op is printed. self.assertIn("Input tensors (2):", message) # Check that the correct input ops are printed. self.assertTrue(re.search(r"0:.*Tensor.*ReadVariableOp:0", message)) self.assertTrue(re.search(r"1:.*Tensor.*mul/y:0", message)) # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) self.assertIn("accum.assign(accum * 2.0)", message) @test_util.run_in_graph_and_eager_modes def testKerasModelHealthyPredictAndFitCalls(self): """Test a simple healthy keras model runs fine under the callback.""" check_numerics_callback.enable_check_numerics() model = models.Sequential() model.add(layers.Dense( units=100, input_shape=(5,), use_bias=False, activation="relu", kernel_initializer="ones")) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense( units=1, activation="linear", kernel_initializer="ones")) model.compile( loss="mse", optimizer=optimizer_v2.gradient_descent.SGD(1e-3)) batch_size = 16 xs = np.zeros([batch_size, 5]) ys = np.ones([batch_size, 1]) outputs = model.predict(xs) self.assertEqual(outputs.shape, (batch_size, 1)) epochs = 100 history = model.fit(xs, ys, epochs=epochs, verbose=0) self.assertEqual(len(history.history["loss"]), epochs) @test_util.run_in_graph_and_eager_modes def testKerasModelWithRNNHealthyPredictAndFitCalls(self): """Test a simple healthy keras recurrent model works under the callback.""" check_numerics_callback.enable_check_numerics() model = models.Sequential() model.add(layers.LSTM(1, input_shape=(2, 4))) model.compile(loss="mse", optimizer="rmsprop") xs = np.zeros([8, 2, 4], dtype=np.float32) ys = np.zeros([8, 1], dtype=np.float32) model.predict(xs) epochs = 3 history = model.fit(xs, ys, epochs=epochs, verbose=0) self.assertEqual(len(history.history["loss"]), epochs) @test_util.run_in_graph_and_eager_modes def testKerasModelUnhealthyPredictAndFitCallsWithLargeLearningRate(self): """Test keras model training crashes with Infinity is caught by callback.""" check_numerics_callback.enable_check_numerics() model = models.Sequential() # Use weight initializers for deterministic behavior during test. model.add(layers.Dense( units=100, input_shape=(5,), activation="relu", kernel_initializer="ones")) model.add(layers.Dense( units=1, activation="linear", kernel_initializer="ones")) lr = 1e3 # Intentionally huge learning rate. model.compile(loss="mse", optimizer=optimizer_v2.gradient_descent.SGD(lr)) batch_size = 16 xs = np.zeros([batch_size, 5]) ys = np.ones([batch_size, 1]) outputs = model.predict(xs) self.assertEqual(outputs.shape, (batch_size, 1)) epochs = 100 message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: model.fit(xs, ys, epochs=epochs, verbose=0)) # Check the content of the error message. # Let's not hardcode the op name for future-proof. self.assertTrue(re.search(r"graph op.*\".*\"", message)) self.assertTrue(re.search(r"dtype:.*float32", message)) self.assertTrue(re.search(r"shape:.*\(.*\)", message)) # Check that the correct input op is printed. self.assertTrue(re.search(r"Input tensor.*", message)) # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) # The stacks are different between when eager execution is enabled and # when it's not (i.e., v1 graph). TODO(cais): Investigate if we can improve # this. if context.executing_eagerly(): self.assertIn("lambda: model.fit(xs, ys,", message) else: self.assertIn("model.compile(", message) @test_util.run_in_graph_and_eager_modes def testInfInCustomKerasLayerWithTfFunctionPredictCall(self): """Test catching Infinity in a custom layer, w/ tf.function.""" check_numerics_callback.enable_check_numerics() class DivByXLayer(layers.Layer): @def_function.function def call(self, x): """The computation performed by the for-test custom layer. Generates Infinity by intention. Args: x: Input tensor of scalar shape. Returns: A scalar tensor. """ one_over_x = 1.0 / x return one_over_x model = models.Sequential() model.add(DivByXLayer(input_shape=[5])) # TODO(b/140245224): Currently the model must be compiled prior to # predict() being called(). Or keras will fall back to V1 behavior. # Remove this after the bug is fixed. model.compile(loss="mse", optimizer="sgd") xs = np.ones([1, 5]) # Calling the model with non-zero inputs should be fine. self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]]) xs = np.zeros([1, 5]) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: model.predict(xs)) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertTrue(re.search(r"shape: \(.*, 5\)", message)) # # Check that the correct input op is printed. self.assertIn("Input tensors (2):", message) # # # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) self.assertIn("one_over_x = 1.0 / x", message) @test_util.run_in_graph_and_eager_modes def testInfInCustomKerasLayerWithoutTfFuntionPredictCall(self): """Test catching Infinity in a custom layer, w/o tf.function.""" check_numerics_callback.enable_check_numerics() class DivByXLayer(layers.Layer): # Not using the tf.function decorator here. def call(self, x): """The computation performed by the for-test custom layer. Generates Infinity by intention. Args: x: Input tensor of scalar shape. Returns: A scalar tensor. """ one_over_x = 1.0 / x return one_over_x model = models.Sequential() model.add(DivByXLayer(input_shape=[5])) # TODO(b/140245224): Currently the model must be compiled prior to # predict() being called(). Or keras will fall back to V1 behavior. # Remove this after the bug is fixed. model.compile(loss="mse", optimizer="sgd") xs = np.ones([1, 5]) # Calling the model with non-zero inputs should be fine. self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]]) xs = np.zeros([1, 5]) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: model.predict(xs)) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertTrue(re.search(r"shape: \(.*, 5\)", message)) # Check that the correct input op is printed. self.assertIn("Input tensors (2):", message) # Check that the correct line for op creation is printed. self.assertTrue(re.search(r"Stack trace of op's creation", message)) self.assertIn("one_over_x = 1.0 / x", message) @test_util.run_in_graph_and_eager_modes def testDatasetMapHealthyResults(self): check_numerics_callback.enable_check_numerics() tensor = constant_op.constant( [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]) def map_fn(x): return math_ops.log(math_ops.square(x) + 1) dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(2).map( map_fn) iterator = dataset_ops.make_one_shot_iterator(dataset) self.assertAllClose(self.evaluate(iterator.get_next()), np.log([1.25, 2])) self.assertAllClose(self.evaluate(iterator.get_next()), np.log([3.25, 5])) @test_util.run_in_graph_and_eager_modes def testCatchInfinityInDatasetMapFunction(self): """Test that callback catches NaN in a tf.dataset map function.""" check_numerics_callback.enable_check_numerics() def generate_nan(x): """Intetionally generates NaNs by taking log of negative number.""" casted_x = math_ops.cast(x, dtypes.float32) return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x dataset = dataset_ops.Dataset.range(10).map(generate_nan) iterator = dataset_ops.make_one_shot_iterator(dataset) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: self.evaluate(iterator.get_next())) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"Log\"", message)) self.assertTrue(re.search(r"dtype.*float32", message)) self.assertIn("shape: (2, 2)\n", message) self.assertTrue(re.search(r"Input tensor.*Tensor.*Log/x:0", message)) self.assertIn( "-> | return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x", message) @test_util.run_in_graph_and_eager_modes def testCustomGradietWithNaNWithTfFunction(self): """Test that callback catches NaN in a gradient function during backprop.""" check_numerics_callback.enable_check_numerics() @custom_gradient.custom_gradient def func_with_bad_grad(x): output = math_ops.sin(x) @def_function.function def grad(dy): # `dy` will come in as 1.0. Taking log of -1.0 leads to NaN. return math_ops.log(-dy) return output, grad x = constant_op.constant(-2.0, dtype=dtypes.float16) def f(x): return func_with_bad_grad(x) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: gradient_checker_v2.compute_gradient(f, [x])) # Check the content of the error message. self.assertTrue(re.search(r"graph op.*\"Log\"", message)) self.assertTrue(re.search(r"dtype.*float16", message)) if context.executing_eagerly(): self.assertIn("shape: ()\n", message) self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg:0", message)) self.assertIn("-> | return math_ops.log(-dy)", message) @test_util.run_in_graph_and_eager_modes def testMobileNetV2Fit(self): """Test training Keras MobileNetV2 application works w/ check numerics.""" check_numerics_callback.enable_check_numerics() model = mobilenet_v2.MobileNetV2(alpha=0.1, weights=None) xs = np.zeros([2] + list(model.input_shape[1:])) ys = np.zeros([2] + list(model.output_shape[1:])) model.compile(optimizer="sgd", loss="categorical_crossentropy") epochs = 1 history = model.fit(xs, ys, epochs=epochs, verbose=0) self.assertEqual(len(history.history["loss"]), epochs) @test_util.run_in_graph_and_eager_modes def testNestedFunctionGradientCall(self): """Catching inf in the inner nested tf.function during backprop.""" check_numerics_callback.enable_check_numerics() x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32) @def_function.function def asinp1(x): # asin()'s gradient overflows at the value close to 1.0. return math_ops.asin(x) + 1.0 @def_function.function def loss(x): return math_ops.square(asinp1(x)) with backprop.GradientTape() as tape: tape.watch(x) y = loss(x) message = self._assertRaisesInvalidArgumentErrorAndGetMessage( lambda: self.evaluate(tape.gradient(y, x))) # Check the content of the error message. # Assume the op Reciprocal or Xdivy is used in the gradient function for # asin(). self.assertTrue((re.search(r"graph op.*\"Reciprocal\"", message) or re.search(r"graph op.*\"Xdivy\"", message))) self.assertTrue(re.search(r"dtype.*float32", message)) @test_util.run_in_graph_and_eager_modes def testExpectedNaNOpOutputs(self): """Test calling operations with benign NaN output.""" check_numerics_callback.enable_check_numerics() # Empty input tensor x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1]) scale = constant_op.constant([1], dtype=dtypes.float32) offset = constant_op.constant([1], dtype=dtypes.float32) # Calling fused_batch_norm with an empty input should output a NaN in the # latter four outputs without triggering the check_numerics callback batch_norm_res = gen_nn_ops._fused_batch_norm( x=x, scale=scale, offset=offset, mean=[], variance=[]) _, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res) self.assertTrue(np.isnan(batch_mean.squeeze())) self.assertTrue(np.isnan(batch_variance.squeeze())) # TODO(cais): Benchmark the slowdown due to callbacks and inserted nodes. if __name__ == "__main__": ops.enable_eager_execution() googletest.main()
ppwwyyxx/tensorflow
tensorflow/python/debug/lib/check_numerics_callback_test.py
Python
apache-2.0
22,712
# -*- coding: utf-8 -*- # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility module for translating XML API objects to/from JSON objects.""" from __future__ import absolute_import import datetime import json import re import textwrap import xml.etree.ElementTree from apitools.base.py import encoding import boto from boto.gs.acl import ACL from boto.gs.acl import ALL_AUTHENTICATED_USERS from boto.gs.acl import ALL_USERS from boto.gs.acl import Entries from boto.gs.acl import Entry from boto.gs.acl import GROUP_BY_DOMAIN from boto.gs.acl import GROUP_BY_EMAIL from boto.gs.acl import GROUP_BY_ID from boto.gs.acl import USER_BY_EMAIL from boto.gs.acl import USER_BY_ID from gslib.cloud_api import ArgumentException from gslib.cloud_api import BucketNotFoundException from gslib.cloud_api import NotFoundException from gslib.cloud_api import Preconditions from gslib.exception import CommandException from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages # In Python 2.6, ElementTree raises ExpatError instead of ParseError. # pylint: disable=g-import-not-at-top try: from xml.etree.ElementTree import ParseError as XmlParseError except ImportError: from xml.parsers.expat import ExpatError as XmlParseError CACHE_CONTROL_REGEX = re.compile(r'^cache-control', re.I) CONTENT_DISPOSITION_REGEX = re.compile(r'^content-disposition', re.I) CONTENT_ENCODING_REGEX = re.compile(r'^content-encoding', re.I) CONTENT_LANGUAGE_REGEX = re.compile(r'^content-language', re.I) CONTENT_MD5_REGEX = re.compile(r'^content-md5', re.I) CONTENT_TYPE_REGEX = re.compile(r'^content-type', re.I) GOOG_API_VERSION_REGEX = re.compile(r'^x-goog-api-version', re.I) GOOG_GENERATION_MATCH_REGEX = re.compile(r'^x-goog-if-generation-match', re.I) GOOG_METAGENERATION_MATCH_REGEX = re.compile( r'^x-goog-if-metageneration-match', re.I) CUSTOM_GOOG_METADATA_REGEX = re.compile(r'^x-goog-meta-(?P<header_key>.*)', re.I) CUSTOM_AMZ_METADATA_REGEX = re.compile(r'^x-amz-meta-(?P<header_key>.*)', re.I) CUSTOM_AMZ_HEADER_REGEX = re.compile(r'^x-amz-(?P<header_key>.*)', re.I) # gsutil-specific GUIDs for marking special metadata for S3 compatibility. S3_ACL_MARKER_GUID = '3b89a6b5-b55a-4900-8c44-0b0a2f5eab43-s3-AclMarker' S3_DELETE_MARKER_GUID = 'eadeeee8-fa8c-49bb-8a7d-0362215932d8-s3-DeleteMarker' S3_MARKER_GUIDS = [S3_ACL_MARKER_GUID, S3_DELETE_MARKER_GUID] # This distinguishes S3 custom headers from S3 metadata on objects. S3_HEADER_PREFIX = 'custom-amz-header' DEFAULT_CONTENT_TYPE = 'application/octet-stream' # Because CORS is just a list in apitools, we need special handling or blank # CORS lists will get sent with other configuration commands such as lifecycle, # which would cause CORS configuration to be unintentionally removed. # Protorpc defaults list values to an empty list, and won't allow us to set the # value to None like other configuration fields, so there is no way to # distinguish the default value from when we actually want to remove the CORS # configuration. To work around this, we create a dummy CORS entry that # signifies that we should nullify the CORS configuration. # A value of [] means don't modify the CORS configuration. # A value of REMOVE_CORS_CONFIG means remove the CORS configuration. REMOVE_CORS_CONFIG = [apitools_messages.Bucket.CorsValueListEntry( maxAgeSeconds=-1, method=['REMOVE_CORS_CONFIG'])] # Similar to CORS above, we need a sentinel value allowing us to specify # when a default object ACL should be private (containing no entries). # A defaultObjectAcl value of [] means don't modify the default object ACL. # A value of [PRIVATE_DEFAULT_OBJ_ACL] means create an empty/private default # object ACL. PRIVATE_DEFAULT_OBJ_ACL = apitools_messages.ObjectAccessControl( id='PRIVATE_DEFAULT_OBJ_ACL') def ObjectMetadataFromHeaders(headers): """Creates object metadata according to the provided headers. gsutil -h allows specifiying various headers (originally intended to be passed to boto in gsutil v3). For the JSON API to be compatible with this option, we need to parse these headers into gsutil_api Object fields. Args: headers: Dict of headers passed via gsutil -h Raises: ArgumentException if an invalid header is encountered. Returns: apitools Object with relevant fields populated from headers. """ obj_metadata = apitools_messages.Object() for header, value in headers.items(): if CACHE_CONTROL_REGEX.match(header): obj_metadata.cacheControl = value.strip() elif CONTENT_DISPOSITION_REGEX.match(header): obj_metadata.contentDisposition = value.strip() elif CONTENT_ENCODING_REGEX.match(header): obj_metadata.contentEncoding = value.strip() elif CONTENT_MD5_REGEX.match(header): obj_metadata.md5Hash = value.strip() elif CONTENT_LANGUAGE_REGEX.match(header): obj_metadata.contentLanguage = value.strip() elif CONTENT_TYPE_REGEX.match(header): if not value: obj_metadata.contentType = DEFAULT_CONTENT_TYPE else: obj_metadata.contentType = value.strip() elif GOOG_API_VERSION_REGEX.match(header): # API version is only relevant for XML, ignore and rely on the XML API # to add the appropriate version. continue elif GOOG_GENERATION_MATCH_REGEX.match(header): # Preconditions are handled elsewhere, but allow these headers through. continue elif GOOG_METAGENERATION_MATCH_REGEX.match(header): # Preconditions are handled elsewhere, but allow these headers through. continue else: custom_goog_metadata_match = CUSTOM_GOOG_METADATA_REGEX.match(header) custom_amz_metadata_match = CUSTOM_AMZ_METADATA_REGEX.match(header) custom_amz_header_match = CUSTOM_AMZ_HEADER_REGEX.match(header) header_key = None if custom_goog_metadata_match: header_key = custom_goog_metadata_match.group('header_key') elif custom_amz_metadata_match: header_key = custom_amz_metadata_match.group('header_key') elif custom_amz_header_match: # If we got here we are guaranteed by the prior statement that this is # not an x-amz-meta- header. header_key = (S3_HEADER_PREFIX + custom_amz_header_match.group('header_key')) if header_key: if header_key.lower() == 'x-goog-content-language': # Work around content-language being inserted into custom metadata. continue if not obj_metadata.metadata: obj_metadata.metadata = apitools_messages.Object.MetadataValue() if not obj_metadata.metadata.additionalProperties: obj_metadata.metadata.additionalProperties = [] obj_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty( key=header_key, value=value)) else: raise ArgumentException( 'Invalid header specified: %s:%s' % (header, value)) return obj_metadata def HeadersFromObjectMetadata(dst_obj_metadata, provider): """Creates a header dictionary based on existing object metadata. Args: dst_obj_metadata: Object metadata to create the headers from. provider: Provider string ('gs' or 's3'). Returns: Headers dictionary. """ headers = {} if not dst_obj_metadata: return # Metadata values of '' mean suppress/remove this header. if dst_obj_metadata.cacheControl is not None: if not dst_obj_metadata.cacheControl: headers['cache-control'] = None else: headers['cache-control'] = dst_obj_metadata.cacheControl.strip() if dst_obj_metadata.contentDisposition: if not dst_obj_metadata.contentDisposition: headers['content-disposition'] = None else: headers['content-disposition'] = ( dst_obj_metadata.contentDisposition.strip()) if dst_obj_metadata.contentEncoding: if not dst_obj_metadata.contentEncoding: headers['content-encoding'] = None else: headers['content-encoding'] = dst_obj_metadata.contentEncoding.strip() if dst_obj_metadata.contentLanguage: if not dst_obj_metadata.contentLanguage: headers['content-language'] = None else: headers['content-language'] = dst_obj_metadata.contentLanguage.strip() if dst_obj_metadata.md5Hash: if not dst_obj_metadata.md5Hash: headers['Content-MD5'] = None else: headers['Content-MD5'] = dst_obj_metadata.md5Hash.strip() if dst_obj_metadata.contentType is not None: if not dst_obj_metadata.contentType: headers['content-type'] = None else: headers['content-type'] = dst_obj_metadata.contentType.strip() if dst_obj_metadata.storageClass: header_name = 'storage-class' if provider == 'gs': header_name = 'x-goog-' + header_name elif provider == 's3': header_name = 'x-amz-' + header_name else: raise ArgumentException('Invalid provider specified: %s' % provider) headers[header_name] = dst_obj_metadata.storageClass.strip() if (dst_obj_metadata.metadata and dst_obj_metadata.metadata.additionalProperties): for additional_property in dst_obj_metadata.metadata.additionalProperties: # Work around content-language being inserted into custom metadata by # the XML API. if additional_property.key == 'content-language': continue # Don't translate special metadata markers. if additional_property.key in S3_MARKER_GUIDS: continue if provider == 'gs': header_name = 'x-goog-meta-' + additional_property.key elif provider == 's3': if additional_property.key.startswith(S3_HEADER_PREFIX): header_name = ('x-amz-' + additional_property.key[len(S3_HEADER_PREFIX):]) else: header_name = 'x-amz-meta-' + additional_property.key else: raise ArgumentException('Invalid provider specified: %s' % provider) if (additional_property.value is not None and not additional_property.value): headers[header_name] = None else: headers[header_name] = additional_property.value return headers def CopyObjectMetadata(src_obj_metadata, dst_obj_metadata, override=False): """Copies metadata from src_obj_metadata to dst_obj_metadata. Args: src_obj_metadata: Metadata from source object. dst_obj_metadata: Initialized metadata for destination object. override: If true, will overwrite metadata in destination object. If false, only writes metadata for values that don't already exist. """ if override or not dst_obj_metadata.cacheControl: dst_obj_metadata.cacheControl = src_obj_metadata.cacheControl if override or not dst_obj_metadata.contentDisposition: dst_obj_metadata.contentDisposition = src_obj_metadata.contentDisposition if override or not dst_obj_metadata.contentEncoding: dst_obj_metadata.contentEncoding = src_obj_metadata.contentEncoding if override or not dst_obj_metadata.contentLanguage: dst_obj_metadata.contentLanguage = src_obj_metadata.contentLanguage if override or not dst_obj_metadata.contentType: dst_obj_metadata.contentType = src_obj_metadata.contentType if override or not dst_obj_metadata.md5Hash: dst_obj_metadata.md5Hash = src_obj_metadata.md5Hash CopyCustomMetadata(src_obj_metadata, dst_obj_metadata, override=override) def CopyCustomMetadata(src_obj_metadata, dst_obj_metadata, override=False): """Copies custom metadata from src_obj_metadata to dst_obj_metadata. Args: src_obj_metadata: Metadata from source object. dst_obj_metadata: Initialized metadata for destination object. override: If true, will overwrite metadata in destination object. If false, only writes metadata for values that don't already exist. """ # TODO: Apitools should ideally treat metadata like a real dictionary instead # of a list of key/value pairs (with an O(N^2) lookup). In practice the # number of values is typically small enough not to matter. # Work around this by creating our own dictionary. if (src_obj_metadata.metadata and src_obj_metadata.metadata.additionalProperties): if not dst_obj_metadata.metadata: dst_obj_metadata.metadata = apitools_messages.Object.MetadataValue() if not dst_obj_metadata.metadata.additionalProperties: dst_obj_metadata.metadata.additionalProperties = [] dst_metadata_dict = {} for dst_prop in dst_obj_metadata.metadata.additionalProperties: dst_metadata_dict[dst_prop.key] = dst_prop.value for src_prop in src_obj_metadata.metadata.additionalProperties: if src_prop.key in dst_metadata_dict: if override: # Metadata values of '' mean suppress/remove this header. if src_prop.value is not None and not src_prop.value: dst_metadata_dict[src_prop.key] = None else: dst_metadata_dict[src_prop.key] = src_prop.value elif src_prop.value != '': # pylint: disable=g-explicit-bool-comparison # Don't propagate '' value since that means to remove the header. dst_metadata_dict[src_prop.key] = src_prop.value # Rewrite the list with our updated dict. dst_obj_metadata.metadata.additionalProperties = [] for k, v in dst_metadata_dict.iteritems(): dst_obj_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty(key=k, value=v)) def PreconditionsFromHeaders(headers): """Creates bucket or object preconditions acccording to the provided headers. Args: headers: Dict of headers passed via gsutil -h Returns: gsutil Cloud API Preconditions object fields populated from headers, or None if no precondition headers are present. """ return_preconditions = Preconditions() try: for header, value in headers.items(): if GOOG_GENERATION_MATCH_REGEX.match(header): return_preconditions.gen_match = long(value) if GOOG_METAGENERATION_MATCH_REGEX.match(header): return_preconditions.meta_gen_match = long(value) except ValueError, _: raise ArgumentException('Invalid precondition header specified. ' 'x-goog-if-generation-match and ' 'x-goog-if-metageneration match must be specified ' 'with a positive integer value.') return return_preconditions def CreateNotFoundExceptionForObjectWrite( dst_provider, dst_bucket_name, src_provider=None, src_bucket_name=None, src_object_name=None, src_generation=None): """Creates a NotFoundException for an object upload or copy. This is necessary because 404s don't necessarily specify which resource does not exist. Args: dst_provider: String abbreviation of destination provider, e.g., 'gs'. dst_bucket_name: Destination bucket name for the write operation. src_provider: String abbreviation of source provider, i.e. 'gs', if any. src_bucket_name: Source bucket name, if any (for the copy case). src_object_name: Source object name, if any (for the copy case). src_generation: Source object generation, if any (for the copy case). Returns: NotFoundException with appropriate message. """ dst_url_string = '%s://%s' % (dst_provider, dst_bucket_name) if src_bucket_name and src_object_name: src_url_string = '%s://%s/%s' % (src_provider, src_bucket_name, src_object_name) if src_generation: src_url_string += '#%s' % str(src_generation) return NotFoundException( 'The source object %s or the destination bucket %s does not exist.' % (src_url_string, dst_url_string)) return NotFoundException( 'The destination bucket %s does not exist or the write to the ' 'destination must be restarted' % dst_url_string) def CreateBucketNotFoundException(code, provider, bucket_name): return BucketNotFoundException('%s://%s bucket does not exist.' % (provider, bucket_name), bucket_name, status=code) def CreateObjectNotFoundException(code, provider, bucket_name, object_name, generation=None): uri_string = '%s://%s/%s' % (provider, bucket_name, object_name) if generation: uri_string += '#%s' % str(generation) return NotFoundException('%s does not exist.' % uri_string, status=code) def EncodeStringAsLong(string_to_convert): """Encodes an ASCII string as a python long. This is used for modeling S3 version_id's as apitools generation. Because python longs can be arbitrarily large, this works. Args: string_to_convert: ASCII string to convert to a long. Returns: Long that represents the input string. """ return long(string_to_convert.encode('hex'), 16) def _DecodeLongAsString(long_to_convert): """Decodes an encoded python long into an ASCII string. This is used for modeling S3 version_id's as apitools generation. Args: long_to_convert: long to convert to ASCII string. If this is already a string, it is simply returned. Returns: String decoded from the input long. """ if isinstance(long_to_convert, basestring): # Already converted. return long_to_convert return hex(long_to_convert)[2:-1].decode('hex') def GenerationFromUrlAndString(url, generation): """Decodes a generation from a StorageURL and a generation string. This is used to represent gs and s3 versioning. Args: url: StorageUrl representing the object. generation: Long or string representing the object's generation or version. Returns: Valid generation string for use in URLs. """ if url.scheme == 's3' and generation: return _DecodeLongAsString(generation) return generation def CheckForXmlConfigurationAndRaise(config_type_string, json_txt): """Checks a JSON parse exception for provided XML configuration.""" try: xml.etree.ElementTree.fromstring(str(json_txt)) raise ArgumentException('\n'.join(textwrap.wrap( 'XML {0} data provided; Google Cloud Storage {0} configuration ' 'now uses JSON format. To convert your {0}, set the desired XML ' 'ACL using \'gsutil {1} set ...\' with gsutil version 3.x. Then ' 'use \'gsutil {1} get ...\' with gsutil version 4 or greater to ' 'get the corresponding JSON {0}.'.format(config_type_string, config_type_string.lower())))) except XmlParseError: pass raise ArgumentException('JSON %s data could not be loaded ' 'from: %s' % (config_type_string, json_txt)) class LifecycleTranslation(object): """Functions for converting between various lifecycle formats. This class handles conversation to and from Boto Cors objects, JSON text, and apitools Message objects. """ @classmethod def BotoLifecycleFromMessage(cls, lifecycle_message): """Translates an apitools message to a boto lifecycle object.""" boto_lifecycle = boto.gs.lifecycle.LifecycleConfig() if lifecycle_message: for rule_message in lifecycle_message.rule: boto_rule = boto.gs.lifecycle.Rule() if (rule_message.action and rule_message.action.type and rule_message.action.type.lower() == 'delete'): boto_rule.action = boto.gs.lifecycle.DELETE if rule_message.condition: if rule_message.condition.age: boto_rule.conditions[boto.gs.lifecycle.AGE] = ( str(rule_message.condition.age)) if rule_message.condition.createdBefore: boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE] = ( str(rule_message.condition.createdBefore)) if rule_message.condition.isLive: boto_rule.conditions[boto.gs.lifecycle.IS_LIVE] = ( str(rule_message.condition.isLive)) if rule_message.condition.numNewerVersions: boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS] = ( str(rule_message.condition.numNewerVersions)) boto_lifecycle.append(boto_rule) return boto_lifecycle @classmethod def BotoLifecycleToMessage(cls, boto_lifecycle): """Translates a boto lifecycle object to an apitools message.""" lifecycle_message = None if boto_lifecycle: lifecycle_message = apitools_messages.Bucket.LifecycleValue() for boto_rule in boto_lifecycle: lifecycle_rule = ( apitools_messages.Bucket.LifecycleValue.RuleValueListEntry()) lifecycle_rule.condition = (apitools_messages.Bucket.LifecycleValue. RuleValueListEntry.ConditionValue()) if boto_rule.action and boto_rule.action == boto.gs.lifecycle.DELETE: lifecycle_rule.action = (apitools_messages.Bucket.LifecycleValue. RuleValueListEntry.ActionValue( type='Delete')) if boto.gs.lifecycle.AGE in boto_rule.conditions: lifecycle_rule.condition.age = int( boto_rule.conditions[boto.gs.lifecycle.AGE]) if boto.gs.lifecycle.CREATED_BEFORE in boto_rule.conditions: lifecycle_rule.condition.createdBefore = ( LifecycleTranslation.TranslateBotoLifecycleTimestamp( boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE])) if boto.gs.lifecycle.IS_LIVE in boto_rule.conditions: lifecycle_rule.condition.isLive = bool( boto_rule.conditions[boto.gs.lifecycle.IS_LIVE]) if boto.gs.lifecycle.NUM_NEWER_VERSIONS in boto_rule.conditions: lifecycle_rule.condition.numNewerVersions = int( boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS]) lifecycle_message.rule.append(lifecycle_rule) return lifecycle_message @classmethod def JsonLifecycleFromMessage(cls, lifecycle_message): """Translates an apitools message to lifecycle JSON.""" return str(encoding.MessageToJson(lifecycle_message)) + '\n' @classmethod def JsonLifecycleToMessage(cls, json_txt): """Translates lifecycle JSON to an apitools message.""" try: deserialized_lifecycle = json.loads(json_txt) # If lifecycle JSON is the in the following format # {'lifecycle': {'rule': ... then strip out the 'lifecycle' key # and reduce it to the following format # {'rule': ... if 'lifecycle' in deserialized_lifecycle: deserialized_lifecycle = deserialized_lifecycle['lifecycle'] lifecycle = encoding.DictToMessage( deserialized_lifecycle, apitools_messages.Bucket.LifecycleValue) return lifecycle except ValueError: CheckForXmlConfigurationAndRaise('lifecycle', json_txt) @classmethod def TranslateBotoLifecycleTimestamp(cls, lifecycle_datetime): """Parses the timestamp from the boto lifecycle into a datetime object.""" return datetime.datetime.strptime(lifecycle_datetime, '%Y-%m-%d').date() class CorsTranslation(object): """Functions for converting between various CORS formats. This class handles conversation to and from Boto Cors objects, JSON text, and apitools Message objects. """ @classmethod def BotoCorsFromMessage(cls, cors_message): """Translates an apitools message to a boto Cors object.""" cors = boto.gs.cors.Cors() cors.cors = [] for collection_message in cors_message: collection_elements = [] if collection_message.maxAgeSeconds: collection_elements.append((boto.gs.cors.MAXAGESEC, str(collection_message.maxAgeSeconds))) if collection_message.method: method_elements = [] for method in collection_message.method: method_elements.append((boto.gs.cors.METHOD, method)) collection_elements.append((boto.gs.cors.METHODS, method_elements)) if collection_message.origin: origin_elements = [] for origin in collection_message.origin: origin_elements.append((boto.gs.cors.ORIGIN, origin)) collection_elements.append((boto.gs.cors.ORIGINS, origin_elements)) if collection_message.responseHeader: header_elements = [] for header in collection_message.responseHeader: header_elements.append((boto.gs.cors.HEADER, header)) collection_elements.append((boto.gs.cors.HEADERS, header_elements)) cors.cors.append(collection_elements) return cors @classmethod def BotoCorsToMessage(cls, boto_cors): """Translates a boto Cors object to an apitools message.""" message_cors = [] if boto_cors.cors: for cors_collection in boto_cors.cors: if cors_collection: collection_message = apitools_messages.Bucket.CorsValueListEntry() for element_tuple in cors_collection: if element_tuple[0] == boto.gs.cors.MAXAGESEC: collection_message.maxAgeSeconds = int(element_tuple[1]) if element_tuple[0] == boto.gs.cors.METHODS: for method_tuple in element_tuple[1]: collection_message.method.append(method_tuple[1]) if element_tuple[0] == boto.gs.cors.ORIGINS: for origin_tuple in element_tuple[1]: collection_message.origin.append(origin_tuple[1]) if element_tuple[0] == boto.gs.cors.HEADERS: for header_tuple in element_tuple[1]: collection_message.responseHeader.append(header_tuple[1]) message_cors.append(collection_message) return message_cors @classmethod def JsonCorsToMessageEntries(cls, json_cors): """Translates CORS JSON to an apitools message. Args: json_cors: JSON string representing CORS configuration. Returns: List of apitools Bucket.CorsValueListEntry. An empty list represents no CORS configuration. """ try: deserialized_cors = json.loads(json_cors) cors = [] for cors_entry in deserialized_cors: cors.append(encoding.DictToMessage( cors_entry, apitools_messages.Bucket.CorsValueListEntry)) return cors except ValueError: CheckForXmlConfigurationAndRaise('CORS', json_cors) @classmethod def MessageEntriesToJson(cls, cors_message): """Translates an apitools message to CORS JSON.""" json_text = '' # Because CORS is a MessageField, serialize/deserialize as JSON list. json_text += '[' printed_one = False for cors_entry in cors_message: if printed_one: json_text += ',' else: printed_one = True json_text += encoding.MessageToJson(cors_entry) json_text += ']\n' return json_text def S3MarkerAclFromObjectMetadata(object_metadata): """Retrieves GUID-marked S3 ACL from object metadata, if present. Args: object_metadata: Object metadata to check. Returns: S3 ACL text, if present, None otherwise. """ if (object_metadata and object_metadata.metadata and object_metadata.metadata.additionalProperties): for prop in object_metadata.metadata.additionalProperties: if prop.key == S3_ACL_MARKER_GUID: return prop.value def AddS3MarkerAclToObjectMetadata(object_metadata, acl_text): """Adds a GUID-marked S3 ACL to the object metadata. Args: object_metadata: Object metadata to add the acl to. acl_text: S3 ACL text to add. """ if not object_metadata.metadata: object_metadata.metadata = apitools_messages.Object.MetadataValue() if not object_metadata.metadata.additionalProperties: object_metadata.metadata.additionalProperties = [] object_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty( key=S3_ACL_MARKER_GUID, value=acl_text)) class AclTranslation(object): """Functions for converting between various ACL formats. This class handles conversion to and from Boto ACL objects, JSON text, and apitools Message objects. """ JSON_TO_XML_ROLES = {'READER': 'READ', 'WRITER': 'WRITE', 'OWNER': 'FULL_CONTROL'} XML_TO_JSON_ROLES = {'READ': 'READER', 'WRITE': 'WRITER', 'FULL_CONTROL': 'OWNER'} @classmethod def BotoAclFromJson(cls, acl_json): acl = ACL() acl.parent = None acl.entries = cls.BotoEntriesFromJson(acl_json, acl) return acl @classmethod # acl_message is a list of messages, either object or bucketaccesscontrol def BotoAclFromMessage(cls, acl_message): acl_dicts = [] for message in acl_message: if message == PRIVATE_DEFAULT_OBJ_ACL: # Sentinel value indicating acl_dicts should be an empty list to create # a private (no entries) default object ACL. break acl_dicts.append(encoding.MessageToDict(message)) return cls.BotoAclFromJson(acl_dicts) @classmethod def BotoAclToJson(cls, acl): if hasattr(acl, 'entries'): return cls.BotoEntriesToJson(acl.entries) return [] @classmethod def BotoObjectAclToMessage(cls, acl): for entry in cls.BotoAclToJson(acl): message = encoding.DictToMessage(entry, apitools_messages.ObjectAccessControl) message.kind = u'storage#objectAccessControl' yield message @classmethod def BotoBucketAclToMessage(cls, acl): for entry in cls.BotoAclToJson(acl): message = encoding.DictToMessage(entry, apitools_messages.BucketAccessControl) message.kind = u'storage#bucketAccessControl' yield message @classmethod def BotoEntriesFromJson(cls, acl_json, parent): entries = Entries(parent) entries.parent = parent entries.entry_list = [cls.BotoEntryFromJson(entry_json) for entry_json in acl_json] return entries @classmethod def BotoEntriesToJson(cls, entries): return [cls.BotoEntryToJson(entry) for entry in entries.entry_list] @classmethod def BotoEntryFromJson(cls, entry_json): """Converts a JSON entry into a Boto ACL entry.""" entity = entry_json['entity'] permission = cls.JSON_TO_XML_ROLES[entry_json['role']] if entity.lower() == ALL_USERS.lower(): return Entry(type=ALL_USERS, permission=permission) elif entity.lower() == ALL_AUTHENTICATED_USERS.lower(): return Entry(type=ALL_AUTHENTICATED_USERS, permission=permission) elif entity.startswith('project'): raise CommandException('XML API does not support project scopes, ' 'cannot translate ACL.') elif 'email' in entry_json: if entity.startswith('user'): scope_type = USER_BY_EMAIL elif entity.startswith('group'): scope_type = GROUP_BY_EMAIL return Entry(type=scope_type, email_address=entry_json['email'], permission=permission) elif 'entityId' in entry_json: if entity.startswith('user'): scope_type = USER_BY_ID elif entity.startswith('group'): scope_type = GROUP_BY_ID return Entry(type=scope_type, id=entry_json['entityId'], permission=permission) elif 'domain' in entry_json: if entity.startswith('domain'): scope_type = GROUP_BY_DOMAIN return Entry(type=scope_type, domain=entry_json['domain'], permission=permission) raise CommandException('Failed to translate JSON ACL to XML.') @classmethod def BotoEntryToJson(cls, entry): """Converts a Boto ACL entry to a valid JSON dictionary.""" acl_entry_json = {} # JSON API documentation uses camel case. scope_type_lower = entry.scope.type.lower() if scope_type_lower == ALL_USERS.lower(): acl_entry_json['entity'] = 'allUsers' elif scope_type_lower == ALL_AUTHENTICATED_USERS.lower(): acl_entry_json['entity'] = 'allAuthenticatedUsers' elif scope_type_lower == USER_BY_EMAIL.lower(): acl_entry_json['entity'] = 'user-%s' % entry.scope.email_address acl_entry_json['email'] = entry.scope.email_address elif scope_type_lower == USER_BY_ID.lower(): acl_entry_json['entity'] = 'user-%s' % entry.scope.id acl_entry_json['entityId'] = entry.scope.id elif scope_type_lower == GROUP_BY_EMAIL.lower(): acl_entry_json['entity'] = 'group-%s' % entry.scope.email_address acl_entry_json['email'] = entry.scope.email_address elif scope_type_lower == GROUP_BY_ID.lower(): acl_entry_json['entity'] = 'group-%s' % entry.scope.id acl_entry_json['entityId'] = entry.scope.id elif scope_type_lower == GROUP_BY_DOMAIN.lower(): acl_entry_json['entity'] = 'domain-%s' % entry.scope.domain acl_entry_json['domain'] = entry.scope.domain else: raise ArgumentException('ACL contains invalid scope type: %s' % scope_type_lower) acl_entry_json['role'] = cls.XML_TO_JSON_ROLES[entry.permission] return acl_entry_json @classmethod def JsonToMessage(cls, json_data, message_type): """Converts the input JSON data into list of Object/BucketAccessControls. Args: json_data: String of JSON to convert. message_type: Which type of access control entries to return, either ObjectAccessControl or BucketAccessControl. Raises: ArgumentException on invalid JSON data. Returns: List of ObjectAccessControl or BucketAccessControl elements. """ try: deserialized_acl = json.loads(json_data) acl = [] for acl_entry in deserialized_acl: acl.append(encoding.DictToMessage(acl_entry, message_type)) return acl except ValueError: CheckForXmlConfigurationAndRaise('ACL', json_data) @classmethod def JsonFromMessage(cls, acl): """Strips unnecessary fields from an ACL message and returns valid JSON. Args: acl: iterable ObjectAccessControl or BucketAccessControl Returns: ACL JSON string. """ serializable_acl = [] if acl is not None: for acl_entry in acl: if acl_entry.kind == u'storage#objectAccessControl': acl_entry.object = None acl_entry.generation = None acl_entry.kind = None acl_entry.bucket = None acl_entry.id = None acl_entry.selfLink = None acl_entry.etag = None serializable_acl.append(encoding.MessageToDict(acl_entry)) return json.dumps(serializable_acl, sort_keys=True, indent=2, separators=(',', ': '))
KaranToor/MA450
google-cloud-sdk/platform/gsutil/gslib/translation_helper.py
Python
apache-2.0
35,255
"""Sensor platform support for yeelight.""" import logging from homeassistant.components.binary_sensor import BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import DATA_CONFIG_ENTRIES, DATA_DEVICE, DATA_UPDATED, DOMAIN from .entity import YeelightEntity _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: """Set up Yeelight from a config entry.""" device = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][config_entry.entry_id][DATA_DEVICE] if device.is_nightlight_supported: _LOGGER.debug("Adding nightlight mode sensor for %s", device.name) async_add_entities([YeelightNightlightModeSensor(device, config_entry)]) class YeelightNightlightModeSensor(YeelightEntity, BinarySensorEntity): """Representation of a Yeelight nightlight mode sensor.""" async def async_added_to_hass(self): """Handle entity which will be added.""" self.async_on_remove( async_dispatcher_connect( self.hass, DATA_UPDATED.format(self._device.host), self.async_write_ha_state, ) ) await super().async_added_to_hass() @property def unique_id(self) -> str: """Return a unique ID.""" return f"{self._unique_id}-nightlight_sensor" @property def name(self): """Return the name of the sensor.""" return f"{self._device.name} nightlight" @property def is_on(self): """Return true if nightlight mode is on.""" return self._device.is_nightlight_enabled
jawilson/home-assistant
homeassistant/components/yeelight/binary_sensor.py
Python
apache-2.0
1,782
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import base from nova.objects import fields from nova import rpc @base.NovaObjectRegistry.register_if(False) class NotificationObject(base.NovaObject): """Base class for every notification related versioned object.""" # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationObject, self).__init__(**kwargs) # The notification objects are created on the fly when nova emits the # notification. This causes that every object shows every field as # changed. We don't want to send this meaningless information so we # reset the object after creation. self.obj_reset_changes(recursive=False) @base.NovaObjectRegistry.register_notification class EventType(NotificationObject): # Version 1.0: Initial version # Version 1.1: New valid actions values are added to the # NotificationActionField enum # Version 1.2: DELETE value is added to the NotificationActionField enum # Version 1.3: Set of new values are added to NotificationActionField enum VERSION = '1.3' fields = { 'object': fields.StringField(nullable=False), 'action': fields.NotificationActionField(nullable=False), 'phase': fields.NotificationPhaseField(nullable=True), } def to_notification_event_type_field(self): """Serialize the object to the wire format.""" s = '%s.%s' % (self.object, self.action) if self.obj_attr_is_set('phase'): s += '.%s' % self.phase return s @base.NovaObjectRegistry.register_if(False) class NotificationPayloadBase(NotificationObject): """Base class for the payload of versioned notifications.""" # SCHEMA defines how to populate the payload fields. It is a dictionary # where every key value pair has the following format: # <payload_field_name>: (<data_source_name>, # <field_of_the_data_source>) # The <payload_field_name> is the name where the data will be stored in the # payload object, this field has to be defined as a field of the payload. # The <data_source_name> shall refer to name of the parameter passed as # kwarg to the payload's populate_schema() call and this object will be # used as the source of the data. The <field_of_the_data_source> shall be # a valid field of the passed argument. # The SCHEMA needs to be applied with the populate_schema() call before the # notification can be emitted. # The value of the payload.<payload_field_name> field will be set by the # <data_source_name>.<field_of_the_data_source> field. The # <data_source_name> will not be part of the payload object internal or # external representation. # Payload fields that are not set by the SCHEMA can be filled in the same # way as in any versioned object. SCHEMA = {} # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationPayloadBase, self).__init__(**kwargs) self.populated = not self.SCHEMA def populate_schema(self, **kwargs): """Populate the object based on the SCHEMA and the source objects :param kwargs: A dict contains the source object at the key defined in the SCHEMA """ for key, (obj, field) in self.SCHEMA.items(): source = kwargs[obj] if source.obj_attr_is_set(field): setattr(self, key, getattr(source, field)) self.populated = True # the schema population will create changed fields but we don't need # this information in the notification self.obj_reset_changes(recursive=False) @base.NovaObjectRegistry.register_notification class NotificationPublisher(NotificationObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': fields.StringField(nullable=False), 'binary': fields.StringField(nullable=False), } @classmethod def from_service_obj(cls, service): return cls(host=service.host, binary=service.binary) @base.NovaObjectRegistry.register_if(False) class NotificationBase(NotificationObject): """Base class for versioned notifications. Every subclass shall define a 'payload' field. """ # Version 1.0: Initial version VERSION = '1.0' fields = { 'priority': fields.NotificationPriorityField(), 'event_type': fields.ObjectField('EventType'), 'publisher': fields.ObjectField('NotificationPublisher'), } def _emit(self, context, event_type, publisher_id, payload): notifier = rpc.get_versioned_notifier(publisher_id) notify = getattr(notifier, self.priority) notify(context, event_type=event_type, payload=payload) def emit(self, context): """Send the notification.""" assert self.payload.populated # Note(gibi): notification payload will be a newly populated object # therefore every field of it will look changed so this does not carry # any extra information so we drop this from the payload. self.payload.obj_reset_changes(recursive=False) self._emit(context, event_type= self.event_type.to_notification_event_type_field(), publisher_id='%s:%s' % (self.publisher.binary, self.publisher.host), payload=self.payload.obj_to_primitive()) def notification_sample(sample): """Class decorator to attach the notification sample information to the notification object for documentation generation purposes. :param sample: the path of the sample json file relative to the doc/notification_samples/ directory in the nova repository root. """ def wrap(cls): if not getattr(cls, 'samples', None): cls.samples = [sample] else: cls.samples.append(sample) return cls return wrap
xuweiliang/Codelibrary
nova/notifications/objects/base.py
Python
apache-2.0
6,689
import itertools import struct import time import pytest import logging from flaky import flaky from cassandra import ConsistencyLevel, InvalidRequest from cassandra.metadata import NetworkTopologyStrategy, SimpleStrategy from cassandra.policies import FallthroughRetryPolicy from cassandra.query import SimpleStatement from dtest import Tester, create_ks from distutils.version import LooseVersion from thrift_bindings.thrift010.ttypes import \ ConsistencyLevel as ThriftConsistencyLevel from thrift_bindings.thrift010.ttypes import (CfDef, Column, ColumnOrSuperColumn, Mutation) from thrift_test import get_thrift_client from tools.assertions import (assert_all, assert_invalid, assert_length_equal, assert_none, assert_one, assert_unavailable) from tools.data import rows_to_list from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper, UpdatingKeyspaceMetadataWrapper, UpdatingTableMetadataWrapper) since = pytest.mark.since logger = logging.getLogger(__name__) class CQLTester(Tester): def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=1, rf=1, protocol_version=None, user=None, password=None, start_rpc=False, **kwargs): cluster = self.cluster if ordered: cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner") if use_cache: cluster.set_configuration_options(values={'row_cache_size_in_mb': 100}) if start_rpc: cluster.set_configuration_options(values={'start_rpc': True}) if user: config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer', 'permissions_validity_in_ms': 0} cluster.set_configuration_options(values=config) if not cluster.nodelist(): cluster.populate(nodes).start(wait_for_binary_proto=True) node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1, protocol_version=protocol_version, user=user, password=password) if create_keyspace: create_ks(session, 'ks', rf) return session class TestCQL(CQLTester): """ Each CQL statement is exercised at least once in order to ensure we execute the code path in StorageProxy. # TODO This probably isn't true anymore? Note that in depth CQL validation is done in Java unit tests, see CASSANDRA-9160. # TODO I'm not convinced we need these. Seems like all the functionality # is covered in greater detail in other test classes. """ def test_keyspace(self): """ Smoke test that basic keyspace operations work: - create a keyspace - assert keyspace exists and is configured as expected with the driver metadata API - ALTER it - assert keyspace was correctly altered with the driver metadata API - DROP it - assert keyspace is no longer in keyspace metadata """ session = self.prepare(create_keyspace=False) meta = UpdatingClusterMetadataWrapper(session.cluster) assert 'ks' not in meta.keyspaces session.execute("CREATE KEYSPACE ks WITH replication = " "{ 'class':'SimpleStrategy', 'replication_factor':1} " "AND DURABLE_WRITES = true") assert 'ks' in meta.keyspaces ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks') assert ks_meta.durable_writes assert isinstance(ks_meta.replication_strategy, SimpleStrategy) session.execute("ALTER KEYSPACE ks WITH replication = " "{ 'class' : 'NetworkTopologyStrategy', 'datacenter1' : 1 } " "AND DURABLE_WRITES = false") assert not ks_meta.durable_writes assert isinstance(ks_meta.replication_strategy, NetworkTopologyStrategy) session.execute("DROP KEYSPACE ks") assert 'ks' not in meta.keyspaces def test_table(self): """ Smoke test that basic table operations work: - create a table - ALTER the table adding a column - insert 10 values - SELECT * and assert the values are there - TRUNCATE the table - SELECT * and assert there are no values - DROP the table - SELECT * and assert the statement raises an InvalidRequest # TODO run SELECTs to make sure each statement works """ session = self.prepare() ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks') session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)") assert 'test1' in ks_meta.tables t1_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test1') session.execute("ALTER TABLE test1 ADD v2 int") assert 'v2' in t1_meta.columns for i in range(0, 10): session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i)) assert_all(session, "SELECT * FROM test1", [[i, i, i] for i in range(0, 10)], ignore_order=True) session.execute("TRUNCATE test1") assert_none(session, "SELECT * FROM test1") session.execute("DROP TABLE test1") assert 'test1' not in ks_meta.tables @since("2.0", max_version="3.X") def test_table_compact_storage(self): """ Smoke test that basic table operations work: - create a table with COMPACT STORAGE - insert 10 values - SELECT * and assert the values are there - TRUNCATE the table - SELECT * and assert there are no values - DROP the table - SELECT * and assert the statement raises an InvalidRequest # TODO run SELECTs to make sure each statement works """ session = self.prepare() ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks') session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE") assert 'test2' in ks_meta.tables for i in range(0, 10): session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i)) assert_all(session, "SELECT * FROM test2", [[i, i, i] for i in range(0, 10)], ignore_order=True) session.execute("TRUNCATE test2") assert_none(session, "SELECT * FROM test2") session.execute("DROP TABLE test2") assert 'test2' not in ks_meta.tables def test_index(self): """ Smoke test CQL statements related to indexes: - CREATE a table - CREATE an index on that table - INSERT 10 values into the table - SELECT from the table over the indexed value and assert the expected values come back - drop the index - assert SELECTing over the indexed value raises an InvalidRequest # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute("CREATE TABLE test3 (k int PRIMARY KEY, v1 int, v2 int)") table_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test3') session.execute("CREATE INDEX testidx ON test3 (v1)") assert 'testidx' in table_meta.indexes for i in range(0, 10): session.execute("INSERT INTO test3 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i)) assert_one(session, "SELECT * FROM test3 WHERE v1 = 0", [0, 0, 0]) session.execute("DROP INDEX testidx") assert 'testidx' not in table_meta.indexes def test_type(self): """ Smoke test basic TYPE operations: - CREATE a type - CREATE a table using that type - ALTER the type and CREATE another table - DROP the tables and type - CREATE another table using the DROPped type and assert it fails with an InvalidRequest # TODO run SELECTs to make sure each statement works # TODO is this even necessary given the existence of the auth_tests? """ session = self.prepare() # even though we only ever use the user_types attribute of this object, # we have to access it each time, because attribute access is how the # value is updated ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks') session.execute("CREATE TYPE address_t (street text, city text, zip_code int)") assert 'address_t' in ks_meta.user_types session.execute("CREATE TABLE test4 (id int PRIMARY KEY, address frozen<address_t>)") session.execute("ALTER TYPE address_t ADD phones set<text>") assert 'phones' in ks_meta.user_types['address_t'].field_names # drop the table so we can safely drop the type it uses session.execute("DROP TABLE test4") session.execute("DROP TYPE address_t") assert 'address_t' not in ks_meta.user_types def test_user(self): """ Smoke test for basic USER queries: - get a session as the default superuser - CREATE a user - ALTER that user by giving it a different password - DROP that user # TODO list users after each to make sure each statement works """ session = self.prepare(user='cassandra', password='cassandra') node1 = self.cluster.nodelist()[0] def get_usernames(): return [user.name for user in session.execute('LIST USERS')] assert 'user1' not in get_usernames() session.execute("CREATE USER user1 WITH PASSWORD 'secret'") # use patient to retry until it works, because it takes some time for # the CREATE to take self.patient_cql_connection(node1, user='user1', password='secret') session.execute("ALTER USER user1 WITH PASSWORD 'secret^2'") # use patient for same reason as above self.patient_cql_connection(node1, user='user1', password='secret^2') session.execute("DROP USER user1") assert 'user1' not in get_usernames() def test_statements(self): """ Smoke test SELECT and UPDATE statements: - create a table - insert 20 rows into the table - run SELECT COUNT queries and assert they return the correct values - bare and with IN and equality conditions - run SELECT * queries with = conditions - run UPDATE queries - SELECT * and assert the UPDATEd values are there - DELETE with a = condition - SELECT the deleted values and make sure nothing is returned # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute("CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )") for i in range(0, 10): session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})".format(i=i)) session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})".format(i=i)) assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [10]) assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')", [20]) assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0", [2]) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, i, i] for i in range(0, 10)]) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'", [['ev2', i, i, i] for i in range(0, 10)]) for i in range(0, 10): session.execute("UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}".format(i=i)) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, 0, 0] for i in range(0, 10)]) session.execute("DELETE FROM test7 WHERE kind = 'ev1'") assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'") assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [0]) @since('3.10') def test_partition_key_allow_filtering(self): """ Filtering with unrestricted parts of partition keys @jira_ticket CASSANDRA-11031 """ session = self.prepare() session.execute(""" CREATE TABLE IF NOT EXISTS test_filter ( k1 int, k2 int, ck1 int, v int, PRIMARY KEY ((k1, k2), ck1) ) """) session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 0, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 1, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 2, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 3, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 0, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 1, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 2, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 3, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 0, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 1, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 2, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 3, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 0, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 1, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 2, 0)") session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 3, 0)") # select test assert_all(session, "SELECT * FROM test_filter WHERE k1 = 0 ALLOW FILTERING", [[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 2, 0], [0, 0, 3, 0], [0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 2, 0], [0, 1, 3, 0]], ignore_order=True) assert_all(session, "SELECT * FROM test_filter WHERE k1 <= 1 AND k2 >= 1 ALLOW FILTERING", [[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 2, 0], [0, 1, 3, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 2, 0], [1, 1, 3, 0]], ignore_order=True) assert_none(session, "SELECT * FROM test_filter WHERE k1 = 2 ALLOW FILTERING") assert_none(session, "SELECT * FROM test_filter WHERE k1 <=0 AND k2 > 1 ALLOW FILTERING") assert_all(session, "SELECT * FROM test_filter WHERE k2 <= 0 ALLOW FILTERING", [[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 2, 0], [0, 0, 3, 0], [1, 0, 0, 0], [1, 0, 1, 0], [1, 0, 2, 0], [1, 0, 3, 0]], ignore_order=True) assert_all(session, "SELECT * FROM test_filter WHERE k1 <= 0 AND k2 = 0 ALLOW FILTERING", [[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 2, 0], [0, 0, 3, 0]]) assert_all(session, "SELECT * FROM test_filter WHERE k2 = 1 ALLOW FILTERING", [[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 2, 0], [0, 1, 3, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 2, 0], [1, 1, 3, 0]], ignore_order=True) assert_none(session, "SELECT * FROM test_filter WHERE k2 = 2 ALLOW FILTERING") # filtering on both Partition Key and Clustering key assert_all(session, "SELECT * FROM test_filter WHERE k1 = 0 AND ck1=0 ALLOW FILTERING", [[0, 0, 0, 0], [0, 1, 0, 0]], ignore_order=True) assert_all(session, "SELECT * FROM test_filter WHERE k1 = 0 AND k2=1 AND ck1=0 ALLOW FILTERING", [[0, 1, 0, 0]]) # count(*) test assert_all(session, "SELECT count(*) FROM test_filter WHERE k2 = 0 ALLOW FILTERING", [[8]]) assert_all(session, "SELECT count(*) FROM test_filter WHERE k2 = 1 ALLOW FILTERING", [[8]]) assert_all(session, "SELECT count(*) FROM test_filter WHERE k2 = 2 ALLOW FILTERING", [[0]]) # test invalid query with pytest.raises(InvalidRequest): session.execute("SELECT * FROM test_filter WHERE k1 = 0") with pytest.raises(InvalidRequest): session.execute("SELECT * FROM test_filter WHERE k1 = 0 AND k2 > 0") with pytest.raises(InvalidRequest): session.execute("SELECT * FROM test_filter WHERE k1 >= 0 AND k2 in (0,1,2)") with pytest.raises(InvalidRequest): session.execute("SELECT * FROM test_filter WHERE k2 > 0") def test_batch(self): """ Smoke test for BATCH statements: - CREATE a table - create a BATCH statement and execute it at QUORUM # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute(""" CREATE TABLE test8 ( userid text PRIMARY KEY, name text, password text ) """) query = SimpleStatement(""" BEGIN BATCH INSERT INTO test8 (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); UPDATE test8 SET password = 'ps22dhds' WHERE userid = 'user3'; INSERT INTO test8 (userid, password) VALUES ('user4', 'ch@ngem3c'); DELETE name FROM test8 WHERE userid = 'user1'; APPLY BATCH; """, consistency_level=ConsistencyLevel.QUORUM) session.execute(query) class TestMiscellaneousCQL(CQLTester): """ CQL tests that cannot be performed as Java unit tests, see CASSANDRA-9160. If you're considering adding a test here, consider writing Java unit tests for CQL validation instead. Add a new test here only if there is a reason for it, e.g. the test is related to the client protocol or thrift, requires examining the log files, or must run on multiple nodes. """ @since('2.1', max_version='3.0') def test_large_collection_errors(self): """ Assert C* logs warnings when selecting too large a collection over protocol v2: - prepare the cluster and connect using protocol v2 - CREATE a table containing a map column - insert over 65535 elements into the map - select all the elements of the map - assert that the correct error was logged """ # We only warn with protocol 2 session = self.prepare(protocol_version=2) cluster = self.cluster node1 = cluster.nodelist()[0] self.fixture_dtest_setup.ignore_log_patterns = ["Detected collection for table"] session.execute(""" CREATE TABLE maps ( userid text PRIMARY KEY, properties map<int, text> ); """) # Insert more than the max, which is 65535 for i in range(70000): session.execute("UPDATE maps SET properties[{}] = 'x' WHERE userid = 'user'".format(i)) # Query for the data and throw exception session.execute("SELECT properties FROM maps WHERE userid = 'user'") node1.watch_log_for("Detected collection for table ks.maps with 70000 elements, more than the 65535 limit. " "Only the first 65535 elements will be returned to the client. Please see " "http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.") @since('2.0', max_version='4') def test_cql3_insert_thrift(self): """ Check that we can insert from thrift into a CQL3 table: - CREATE a table via CQL - insert values via thrift - SELECT the inserted values and assert they are there as expected @jira_ticket CASSANDRA-4377 """ session = self.prepare(start_rpc=True) session.execute(""" CREATE TABLE test ( k int, c int, v int, PRIMARY KEY (k, c) ) """) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() client.set_keyspace('ks') key = struct.pack('>i', 2) column_name_component = struct.pack('>i', 4) # component length + component + EOC + component length + component + EOC column_name = b'\x00\x04' + column_name_component + b'\x00' + b'\x00\x01' + 'v'.encode("utf-8") + b'\x00' value = struct.pack('>i', 8) client.batch_mutate( {key: {'test': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=value, timestamp=100)))]}}, ThriftConsistencyLevel.ONE) assert_one(session, "SELECT * FROM test", [2, 4, 8]) @since('2.0', max_version='4') def test_rename(self): """ Check that a thrift-created table can be renamed via CQL: - create a table via the thrift interface - INSERT a row via CQL - ALTER the name of the table via CQL - SELECT from the table and assert the values inserted are there """ session = self.prepare(start_rpc=True) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() cfdef = CfDef() cfdef.keyspace = 'ks' cfdef.name = 'test' cfdef.column_type = 'Standard' cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)' cfdef.key_validation_class = 'UTF8Type' cfdef.default_validation_class = 'UTF8Type' client.set_keyspace('ks') client.system_add_column_family(cfdef) session.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')") session.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3") assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2]) def test_invalid_string_literals(self): """ @jira_ticket CASSANDRA-8101 - assert INSERTing into a nonexistent table fails normally, with an InvalidRequest exception - create a table with ascii and text columns - assert that trying to execute an insert statement with non-UTF8 contents raises a ProtocolException - tries to insert into a nonexistent column to make sure the ProtocolException is raised over other errors """ session = self.prepare() # this should fail as normal, not with a ProtocolException assert_invalid(session, "insert into invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')") session = self.patient_cql_connection(self.cluster.nodelist()[0], keyspace='ks') session.execute("create table invalid_string_literals (k int primary key, a ascii, b text)") # this should still fail with an InvalidRequest assert_invalid(session, "insert into invalid_string_literals (k, c) VALUES (0, '\u038E\u0394\u03B4\u03E0')") # try to insert utf-8 characters into an ascii column and make sure it fails with pytest.raises(InvalidRequest, match='Invalid ASCII character in string literal'): session.execute("insert into invalid_string_literals (k, a) VALUES (0, '\xE0\x80\x80')") def test_prepared_statement_invalidation(self): """ @jira_ticket CASSANDRA-7910 - CREATE a table and INSERT a row - prepare 2 prepared SELECT statements - SELECT the row with a bound prepared statement and assert it returns the expected row - ALTER the table, dropping a column - assert prepared statement without that column in it still works - assert prepared statement containing that column fails - ALTER the table, adding a column - assert prepared statement without that column in it still works - assert prepared statement containing that column also still works - ALTER the table, changing the type of a column - assert that both prepared statements still work """ session = self.prepare() session.execute("CREATE TABLE test (k int PRIMARY KEY, a int, b int, c int)") session.execute("INSERT INTO test (k, a, b, c) VALUES (0, 0, 0, 0)") wildcard_prepared = session.prepare("SELECT * FROM test") explicit_prepared = session.prepare("SELECT k, a, b, c FROM test") result = session.execute(wildcard_prepared.bind(None)) assert result, [(0, 0, 0 == 0)] session.execute("ALTER TABLE test DROP c") result = session.execute(wildcard_prepared.bind(None)) # wildcard select can be automatically re-prepared by the driver assert result, [(0, 0 == 0)] # but re-preparing the statement with explicit columns should fail # (see PYTHON-207 for why we expect InvalidRequestException instead of the normal exc) assert_invalid(session, explicit_prepared.bind(None), expected=InvalidRequest) session.execute("ALTER TABLE test ADD d int") result = session.execute(wildcard_prepared.bind(None)) assert result, [(0, 0, 0 == None)] if self.cluster.version() < LooseVersion('3.0'): explicit_prepared = session.prepare("SELECT k, a, b, d FROM test") # when the type is altered, both statements will need to be re-prepared # by the driver, but the re-preparation should succeed session.execute("ALTER TABLE test ALTER d TYPE blob") result = session.execute(wildcard_prepared.bind(None)) assert result, [(0, 0, 0 == None)] result = session.execute(explicit_prepared.bind(None)) assert result, [(0, 0, 0 == None)] def test_range_slice(self): """ Regression test for CASSANDRA-1337: - CREATE a table - INSERT 2 rows - SELECT * from the table - assert 2 rows were returned @jira_ticket CASSANDRA-1337 # TODO I don't see how this is an interesting test or how it tests 1337. """ cluster = self.cluster cluster.populate(2).start() node1 = cluster.nodelist()[0] time.sleep(0.2) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test ( k text PRIMARY KEY, v int ); """) time.sleep(1) session.execute("INSERT INTO test (k, v) VALUES ('foo', 0)") session.execute("INSERT INTO test (k, v) VALUES ('bar', 1)") res = list(session.execute("SELECT * FROM test")) assert len(res) == 2, res @pytest.mark.skip(reason="Skipping until PYTHON-893 is fixed") def test_many_columns(self): """ Test for tables with thousands of columns. For CASSANDRA-11621. """ session = self.prepare() width = 5000 cluster = self.cluster session.execute("CREATE TABLE very_wide_table (pk int PRIMARY KEY, " + ",".join(["c_{} int".format(i) for i in range(width)]) + ")") session.execute("INSERT INTO very_wide_table (pk, " + ",".join(["c_{}".format(i) for i in range(width)]) + ") VALUES (100," + ",".join([str(i) for i in range(width)]) + ")") assert_all(session, "SELECT " + ",".join(["c_{}".format(i) for i in range(width)]) + " FROM very_wide_table", [[i for i in range(width)]]) @since("3.11", max_version="3.X") def test_drop_compact_storage_flag(self): """ Test for CASSANDRA-10857, verifying the schema change distribution across the other nodes. """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session1 = self.patient_cql_connection(node1) session2 = self.patient_cql_connection(node2) session3 = self.patient_cql_connection(node3) create_ks(session1, 'ks', 3) sessions = [session1, session2, session3] for session in sessions: session.set_keyspace('ks') session1.execute(""" CREATE TABLE test_drop_compact_storage (k int PRIMARY KEY, s1 int) WITH COMPACT STORAGE; """) session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (1,1)") session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (2,2)") session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (3,3)") for session in sessions: res = session.execute("SELECT * from test_drop_compact_storage") assert rows_to_list(res) == [[1, 1], [2, 2], [3, 3]] session1.execute("ALTER TABLE test_drop_compact_storage DROP COMPACT STORAGE") for session in sessions: assert_all(session, "SELECT * from test_drop_compact_storage", [[1, None, 1, None], [2, None, 2, None], [3, None, 3, None]]) @since('3.2') class AbortedQueryTester(CQLTester): """ @jira_ticket CASSANDRA-7392 Test that read-queries that take longer than read_request_timeout_in_ms time out. # TODO The important part of these is "set up a combination of # configuration options that will make all reads time out, then # try to read and assert it times out". This can probably be made much # simpler -- most of the logic can be factored out. In many cases it # probably isn't even necessary to define a custom table or to insert # more than one value. """ def test_local_query(self): """ Check that a query running on the local coordinator node times out: - set the read request timeouts to 1 second - start the cluster with read_iteration_delay set to 5 ms - the delay will be applied ot each row iterated and will cause read queries to take longer than the read timeout - CREATE and INSERT into a table - SELECT * from the table using a retry policy that never retries, and assert it times out @jira_ticket CASSANDRA-7392 """ cluster = self.cluster cluster.set_configuration_options(values={'request_timeout_in_ms': 1000, 'read_request_timeout_in_ms': 1000, 'range_request_timeout_in_ms': 1000}) # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds every # CQL row iterated for non system queries, so that these queries take much longer to complete, # see ReadCommand.withStateTracking() cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=5"]) node = cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test1 ( id int PRIMARY KEY, val text ); """) for i in range(500): session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i)) # use debug logs because at info level no-spam logger has unpredictable results mark = node.mark_log(filename='debug.log') statement = SimpleStatement("SELECT * from test1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=120) def test_remote_query(self): """ Check that a query running on a node other than the coordinator times out: - populate the cluster with 2 nodes - set the read request timeouts to 1 second - start one node without having it join the ring - start the other node with read_iteration_delay set to 5 ms - the delay will be applied ot each row iterated and will cause read queries to take longer than the read timeout - CREATE a table - INSERT 5000 rows on a session on the node that is not a member of the ring - run SELECT statements and assert they fail # TODO refactor SELECT statements: # - run the statements in a loop to reduce duplication # - watch the log after each query # - assert we raise the right error """ cluster = self.cluster cluster.set_configuration_options(values={'request_timeout_in_ms': 1000, 'read_request_timeout_in_ms': 1000, 'range_request_timeout_in_ms': 1000}) cluster.populate(2) node1, node2 = cluster.nodelist() node1.start(wait_for_binary_proto=True, join_ring=False) # ensure other node executes queries node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=5"]) # see above for explanation session = self.patient_exclusive_cql_connection(node1) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test2 ( id int, col int, val text, PRIMARY KEY(id, col) ); """) for i, j in itertools.product(list(range(10)), list(range(500))): session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j)) # use debug logs because at info level no-spam logger has unpredictable results mark = node2.mark_log(filename='debug.log') statement = SimpleStatement("SELECT * from test2", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) statement = SimpleStatement("SELECT * from test2 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) statement = SimpleStatement("SELECT * from test2 where id IN (1, 2, 3) AND col > 10", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60) def test_index_query(self): """ Check that a secondary index query times out: - populate a 1-node cluster - set the read request timeouts to 1 second - start one node without having it join the ring - start the other node with read_iteration_delay set to 5 ms - the delay will be applied ot each row iterated and will cause read queries to take longer than the read timeout - CREATE a table - CREATE an index on the table - INSERT 500 values into the table - SELECT over the table and assert it times out """ cluster = self.cluster cluster.set_configuration_options(values={'request_timeout_in_ms': 1000, 'read_request_timeout_in_ms': 1000, 'range_request_timeout_in_ms': 1000}) cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=5"]) # see above for explanation node = cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test3 ( id int PRIMARY KEY, col int, val text ); """) session.execute("CREATE INDEX ON test3 (col)") for i in range(500): session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, 50, 'foo')".format(i)) # use debug logs because at info level no-spam logger has unpredictable results mark = node.mark_log(filename='debug.log') statement = session.prepare("SELECT * from test3 WHERE col = ? ALLOW FILTERING") statement.consistency_level = ConsistencyLevel.ONE statement.retry_policy = FallthroughRetryPolicy() assert_unavailable(lambda c: logger.debug(c.execute(statement, [50])), session) node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=120) def test_materialized_view(self): """ Check that a materialized view query times out: - populate a 2-node cluster - set the read request timeouts to 1 second - start one node without having it join the ring - start the other node with read_iteration_delay set to 5 ms - the delay will be applied ot each row iterated and will cause read queries to take longer than the read timeout - CREATE a table - INSERT 500 values into that table - CREATE a materialized view over that table - assert querying that table results in an unavailable exception """ cluster = self.cluster cluster.set_configuration_options(values={'request_timeout_in_ms': 1000, 'read_request_timeout_in_ms': 1000, 'range_request_timeout_in_ms': 1000}) cluster.populate(2) node1, node2 = cluster.nodelist() node1.start(wait_for_binary_proto=True, join_ring=False) # ensure other node executes queries node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=5"]) # see above for explanation session = self.patient_exclusive_cql_connection(node1) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test4 ( id int PRIMARY KEY, col int, val text ); """) session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 " "WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)")) for i in range(500): session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, 50, 'foo')".format(i)) # use debug logs because at info level no-spam logger has unpredictable results mark = node2.mark_log(filename='debug.log') statement = SimpleStatement("SELECT * FROM mv WHERE col = 50", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy()) assert_unavailable(lambda c: logger.debug(c.execute(statement)), session) node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60) @since('3.10') class TestCQLSlowQuery(CQLTester): """ Test slow query logging. @jira_ticket CASSANDRA-12403 """ def test_local_query(self): """ Check that a query running locally on the coordinator is reported as slow: - start a one node cluster with slow_query_log_timeout_in_ms set to a small value and the read request timeouts set to a large value (to ensure the query is not aborted) and read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms (this will cause read queries to take longer than the slow query timeout) - CREATE and INSERT into a table - SELECT * from the table using a retry policy that never retries, and check that the slow query log messages are present in the debug logs (we cannot check the logs at info level because the no spam logger has unpredictable results) @jira_ticket CASSANDRA-12403 """ cluster = self.cluster cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10, 'request_timeout_in_ms': 120000, 'read_request_timeout_in_ms': 120000, 'range_request_timeout_in_ms': 120000}) # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each # iteration of non system queries, so that these queries take much longer to complete, # see ReadCommand.withStateTracking() cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=1"]) node = cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test1 ( id int, col int, val text, PRIMARY KEY(id, col) ); """) for i in range(100): session.execute("INSERT INTO test1 (id, col, val) VALUES (1, {}, 'foo')".format(i)) # only check debug logs because at INFO level the no-spam logger has unpredictable results mark = node.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"], from_mark=mark, filename='debug.log', timeout=60) mark = node.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test1 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"], from_mark=mark, filename='debug.log', timeout=60) mark = node.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test1 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"], from_mark=mark, filename='debug.log', timeout=60) mark = node.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test1 where token(id) < 0", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"], from_mark=mark, filename='debug.log', timeout=60) def test_remote_query(self): """ Check that a query running on a node other than the coordinator is reported as slow: - populate the cluster with 2 nodes - start one node without having it join the ring - start the other one node with slow_query_log_timeout_in_ms set to a small value and the read request timeouts set to a large value (to ensure the query is not aborted) and read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms (this will cause read queries to take longer than the slow query timeout) - CREATE a table - INSERT 5000 rows on a session on the node that is not a member of the ring - run SELECT statements and check that the slow query messages are present in the debug logs (we cannot check the logs at info level because the no spam logger has unpredictable results) @jira_ticket CASSANDRA-12403 """ cluster = self.cluster cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10, 'request_timeout_in_ms': 120000, 'read_request_timeout_in_ms': 120000, 'range_request_timeout_in_ms': 120000}) cluster.populate(2) node1, node2 = cluster.nodelist() node1.start(wait_for_binary_proto=True, join_ring=False) # ensure other node executes queries node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=1"]) # see above for explanation session = self.patient_exclusive_cql_connection(node1) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test2 ( id int, col int, val text, PRIMARY KEY(id, col) ); """) for i, j in itertools.product(list(range(100)), list(range(10))): session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j)) # only check debug logs because at INFO level the no-spam logger has unpredictable results mark = node2.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test2", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"], from_mark=mark, filename='debug.log', timeout=60) mark = node2.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test2 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"], from_mark=mark, filename='debug.log', timeout=60) mark = node2.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test2 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"], from_mark=mark, filename='debug.log', timeout=60) mark = node2.mark_log(filename='debug.log') session.execute(SimpleStatement("SELECT * from test2 where token(id) < 0", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"], from_mark=mark, filename='debug.log', timeout=60) def test_disable_slow_query_log(self): """ Check that a query is NOT reported as slow if slow query logging is disabled. - start a one node cluster with slow_query_log_timeout_in_ms set to 0 milliseconds (this will disable slow query logging), the read request timeouts set to a large value (to ensure queries are not aborted) and read_iteration_delay set to 5 milliseconds (this will cause read queries to take longer than usual) - CREATE and INSERT into a table - SELECT * from the table using a retry policy that never retries, and check that the slow query log messages are present in the logs @jira_ticket CASSANDRA-12403 """ cluster = self.cluster cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 0, 'request_timeout_in_ms': 120000, 'read_request_timeout_in_ms': 120000, 'range_request_timeout_in_ms': 120000}) # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each # iteration of non system queries, so that these queries take much longer to complete, # see ReadCommand.withStateTracking() cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_report_interval_ms=10", "-Dcassandra.test.read_iteration_delay_ms=1"]) node = cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, 'ks', 1) session.execute(""" CREATE TABLE test3 ( id int PRIMARY KEY, val text ); """) for i in range(100): session.execute("INSERT INTO test3 (id, val) VALUES ({}, 'foo')".format(i)) session.execute(SimpleStatement("SELECT * from test3", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())) time.sleep(1) # do our best to ensure logs had a chance to appear self._check_logs(node, "SELECT \* FROM ks.test3", 'debug.log', 0) def _check_logs(self, node, pattern, filename, num_expected): ret = node.grep_log(pattern, filename=filename) assert_length_equal(ret, num_expected) class TestLWTWithCQL(Tester): """ Validate CQL queries for LWTs for static columns for null and non-existing rows @jira_ticket CASSANDRA-9842 """ @pytest.fixture(scope='function', autouse=True) def fixture_post_initialize_cluster(self, fixture_dtest_setup): cluster = fixture_dtest_setup.cluster cluster.populate(3) cluster.start(wait_for_binary_proto=True) def get_lwttester_session(self): node1 = self.cluster.nodelist()[0] session = self.patient_cql_connection(node1) session.execute("""CREATE KEYSPACE IF NOT EXISTS ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':1}""") session.execute("USE ks") return session def test_lwt_with_static_columns(self): session = self.get_lwttester_session() session.execute(""" CREATE TABLE lwt_with_static (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """) assert_one(session, "UPDATE lwt_with_static SET s = 1 WHERE a = 1 IF s = NULL", [True]) assert_one(session, "SELECT * FROM lwt_with_static", [1, None, 1, None]) assert_one(session, "UPDATE lwt_with_static SET s = 2 WHERE a = 2 IF EXISTS", [False]) assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 1", [1, None, 1, None]) assert_one(session, "INSERT INTO lwt_with_static (a, s) VALUES (2, 2) IF NOT EXISTS", [True]) assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 2", [2, None, 2, None]) assert_one(session, "BEGIN BATCH\n" + "INSERT INTO lwt_with_static (a, b, d) values (3, 3, 'a');\n" + "UPDATE lwt_with_static SET s = 3 WHERE a = 3 IF s = null;\n" + "APPLY BATCH;", [True]) assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 3", [3, 3, 3, "a"]) # LWT applies before INSERT assert_one(session, "BEGIN BATCH\n" + "INSERT INTO lwt_with_static (a, b, d) values (4, 4, 'a');\n" + "UPDATE lwt_with_static SET s = 4 WHERE a = 4 IF s = null;\n" + "APPLY BATCH;", [True]) assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 4", [4, 4, 4, "a"]) def _validate_non_existing_or_null_values(self, table_name, session): assert_one(session, "UPDATE {} SET s = 1 WHERE a = 1 IF s = NULL".format(table_name), [True]) assert_one(session, "SELECT a, s, d FROM {} WHERE a = 1".format(table_name), [1, 1, None]) assert_one(session, "UPDATE {} SET s = 2 WHERE a = 2 IF s IN (10,20,NULL)".format(table_name), [True]) assert_one(session, "SELECT a, s, d FROM {} WHERE a = 2".format(table_name), [2, 2, None]) assert_one(session, "UPDATE {} SET s = 4 WHERE a = 4 IF s != 4".format(table_name), [True]) assert_one(session, "SELECT a, s, d FROM {} WHERE a = 4".format(table_name), [4, 4, None]) def _is_new_lwt_format_version(self, version): return version > LooseVersion('3.9') or (version > LooseVersion('3.0.9') and version < LooseVersion('3.1')) @flaky def test_conditional_updates_on_static_columns_with_null_values(self): session = self.get_lwttester_session() table_name = "conditional_updates_on_static_columns_with_null" session.execute(""" CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name)) for i in range(1, 6): session.execute("INSERT INTO {} (a, b) VALUES ({}, {})".format(table_name, i, i)) self._validate_non_existing_or_null_values(table_name, session) assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False]) assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None]) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False]) assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None]) def test_conditional_updates_on_static_columns_with_non_existing_values(self): session = self.get_lwttester_session() table_name = "conditional_updates_on_static_columns_with_ne" session.execute(""" CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name)) self._validate_non_existing_or_null_values(table_name, session) assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name)) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name)) def _validate_non_existing_or_null_values_batch(self, table_name, session): assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, d) values (2, 2, 'a'); UPDATE {table_name} SET s = 2 WHERE a = 2 IF s = null; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 2".format(table_name=table_name), [2, 2, 2, "a"]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (4, 4, 4, 'a') UPDATE {table_name} SET s = 5 WHERE a = 4 IF s = null; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 4".format(table_name=table_name), [4, 4, 5, "a"]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (5, 5, 5, 'a') UPDATE {table_name} SET s = 6 WHERE a = 5 IF s IN (1,2,null) APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 5".format(table_name=table_name), [5, 5, 6, "a"]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (7, 7, 7, 'a') UPDATE {table_name} SET s = 8 WHERE a = 7 IF s != 7; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 7".format(table_name=table_name), [7, 7, 8, "a"]) def test_conditional_updates_on_static_columns_with_null_values_batch(self): session = self.get_lwttester_session() table_name = "lwt_on_static_columns_with_null_batch" session.execute(""" CREATE TABLE {table_name} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name=table_name)) for i in range(1, 7): session.execute("INSERT INTO {table_name} (a, b) VALUES ({i}, {i})".format(table_name=table_name, i=i)) self._validate_non_existing_or_null_values_batch(table_name, session) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (3, 3, 40, 'a') UPDATE {table_name} SET s = 30 WHERE a = 3 IF s {operator} 5; APPLY BATCH""".format(table_name=table_name, operator=operator), [False, 3, 3, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 3".format(table_name=table_name), [3, 3, None, None]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (6, 6, 70, 'a') UPDATE {table_name} SET s = 60 WHERE a = 6 IF s IN (1,2,3) APPLY BATCH""".format(table_name=table_name), [False, 6, 6, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 6".format(table_name=table_name), [6, 6, None, None]) def test_conditional_deletes_on_static_columns_with_null_values(self): session = self.get_lwttester_session() table_name = "conditional_deletes_on_static_with_null" session.execute(""" CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b)) """.format(table_name)) for i in range(1, 6): session.execute("INSERT INTO {} (a, b, s1, s2, v) VALUES ({}, {}, {}, null, {})".format(table_name, i, i, i, i)) assert_one(session, "DELETE s1 FROM {} WHERE a = 1 IF s2 = null".format(table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 1".format(table_name), [1, 1, None, None, 1]) assert_one(session, "DELETE s1 FROM {} WHERE a = 2 IF s2 IN (10,20,30)".format(table_name), [False, None]) assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, 2, None, 2]) assert_one(session, "DELETE s1 FROM {} WHERE a = 3 IF s2 IN (null,20,30)".format(table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None, 3]) assert_one(session, "DELETE s1 FROM {} WHERE a = 4 IF s2 != 4".format(table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4]) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, "DELETE s1 FROM {} WHERE a = 5 IF s2 {} 3".format(table_name, operator), [False, None]) assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, 5, None, 5]) def test_conditional_deletes_on_static_columns_with_null_values_batch(self): session = self.get_lwttester_session() table_name = "conditional_deletes_on_static_with_null_batch" session.execute(""" CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b)) """.format(table_name)) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) values (2, 2, 2, 2); DELETE s1 FROM {table_name} WHERE a = 2 IF s2 = null; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, None, None, 2]) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) values (3, 3, 3, 3); DELETE s1 FROM {table_name} WHERE a = 3 IF s2 {operator} 5; APPLY BATCH""".format(table_name=table_name, operator=operator), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name)) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) values (6, 6, 6, 6); DELETE s1 FROM {table_name} WHERE a = 6 IF s2 IN (1,2,3); APPLY BATCH""".format(table_name=table_name), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 6".format(table_name)) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) values (4, 4, 4, 4); DELETE s1 FROM {table_name} WHERE a = 4 IF s2 = null; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) VALUES (5, 5, 5, 5); DELETE s1 FROM {table_name} WHERE a = 5 IF s1 IN (1,2,null); APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None, 5]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s1, v) values (7, 7, 7, 7); DELETE s1 FROM {table_name} WHERE a = 7 IF s2 != 7; APPLY BATCH""".format(table_name=table_name), [True]) assert_one(session, "SELECT * FROM {} WHERE a = 7".format(table_name), [7, 7, None, None, 7]) def lwt_with_empty_resultset(self): """ LWT with unset row. @jira_ticket CASSANDRA-12694 """ session = self.get_lwttester_session() session.execute(""" CREATE TABLE test (pk text, v1 int, v2 text, PRIMARY KEY (pk)); """) session.execute("update test set v1 = 100 where pk = 'test1';") node1 = self.cluster.nodelist()[0] self.cluster.flush() assert_one(session, "UPDATE test SET v1 = 100 WHERE pk = 'test1' IF v2 = null;", [True])
beobal/cassandra-dtest
cql_test.py
Python
apache-2.0
66,309
# -*- coding: utf-8 -*- """ flask.ext.security.datastore ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains an user datastore classes. :copyright: (c) 2012 by Matt Wright. :license: MIT, see LICENSE for more details. """ from .utils import get_identity_attributes, string_types class Datastore(object): def __init__(self, db): self.db = db def commit(self): pass def put(self, model): raise NotImplementedError def delete(self, model): raise NotImplementedError class SQLAlchemyDatastore(Datastore): def commit(self): self.db.session.commit() def put(self, model): self.db.session.add(model) return model def delete(self, model): self.db.session.delete(model) class MongoEngineDatastore(Datastore): def put(self, model): model.save() return model def delete(self, model): model.delete() class PeeweeDatastore(Datastore): def put(self, model): model.save() return model def delete(self, model): model.delete_instance() class UserDatastore(object): """Abstracted user datastore. :param user_model: A user model class definition :param role_model: A role model class definition """ def __init__(self, user_model, role_model): self.user_model = user_model self.role_model = role_model def _prepare_role_modify_args(self, user, role): if isinstance(user, string_types): user = self.find_user(email=user) if isinstance(role, string_types): role = self.find_role(role) return user, role def _prepare_create_user_args(self, **kwargs): kwargs.setdefault('active', True) roles = kwargs.get('roles', []) for i, role in enumerate(roles): rn = role.name if isinstance(role, self.role_model) else role # see if the role exists roles[i] = self.find_role(rn) kwargs['roles'] = roles return kwargs def get_user(self, id_or_email): """Returns a user matching the specified ID or email address""" raise NotImplementedError def find_user(self, *args, **kwargs): """Returns a user matching the provided parameters.""" raise NotImplementedError def find_role(self, *args, **kwargs): """Returns a role matching the provided name.""" raise NotImplementedError def add_role_to_user(self, user, role): """Adds a role tp a user :param user: The user to manipulate :param role: The role to add to the user """ user, role = self._prepare_role_modify_args(user, role) if role not in user.roles: user.roles.append(role) self.put(user) return True return False def remove_role_from_user(self, user, role): """Removes a role from a user :param user: The user to manipulate :param role: The role to remove from the user """ rv = False user, role = self._prepare_role_modify_args(user, role) if role in user.roles: rv = True user.roles.remove(role) return rv def toggle_active(self, user): """Toggles a user's active status. Always returns True.""" user.active = not user.active return True def deactivate_user(self, user): """Deactivates a specified user. Returns `True` if a change was made. :param user: The user to deactivate """ if user.active: user.active = False return True return False def activate_user(self, user): """Activates a specified user. Returns `True` if a change was made. :param user: The user to activate """ if not user.active: user.active = True return True return False def create_role(self, **kwargs): """Creates and returns a new role from the given parameters.""" role = self.role_model(**kwargs) return self.put(role) def find_or_create_role(self, name, **kwargs): """Returns a role matching the given name or creates it with any additionally provided parameters """ kwargs["name"] = name return self.find_role(name) or self.create_role(**kwargs) def create_user(self, **kwargs): """Creates and returns a new user from the given parameters.""" kwargs = self._prepare_create_user_args(**kwargs) user = self.user_model(**kwargs) return self.put(user) def delete_user(self, user): """Delete the specified user :param user: The user to delete """ self.delete(user) class SQLAlchemyUserDatastore(SQLAlchemyDatastore, UserDatastore): """A SQLAlchemy datastore implementation for Flask-Security that assumes the use of the Flask-SQLAlchemy extension. """ def __init__(self, db, user_model, role_model): SQLAlchemyDatastore.__init__(self, db) UserDatastore.__init__(self, user_model, role_model) def get_user(self, identifier): if self._is_numeric(identifier): return self.user_model.query.get(identifier) for attr in get_identity_attributes(): query = getattr(self.user_model, attr).ilike(identifier) rv = self.user_model.query.filter(query).first() if rv is not None: return rv def _is_numeric(self, value): try: int(value) except ValueError: return False return True def find_user(self, **kwargs): return self.user_model.query.filter_by(**kwargs).first() def find_role(self, role): return self.role_model.query.filter_by(name=role).first() class MongoEngineUserDatastore(MongoEngineDatastore, UserDatastore): """A MongoEngine datastore implementation for Flask-Security that assumes the use of the Flask-MongoEngine extension. """ def __init__(self, db, user_model, role_model): MongoEngineDatastore.__init__(self, db) UserDatastore.__init__(self, user_model, role_model) def get_user(self, identifier): from mongoengine import ValidationError try: return self.user_model.objects(id=identifier).first() except ValidationError: pass for attr in get_identity_attributes(): query_key = '%s__iexact' % attr query = {query_key: identifier} rv = self.user_model.objects(**query).first() if rv is not None: return rv def find_user(self, **kwargs): try: from mongoengine.queryset import Q, QCombination except ImportError: from mongoengine.queryset.visitor import Q, QCombination from mongoengine.errors import ValidationError queries = map(lambda i: Q(**{i[0]: i[1]}), kwargs.items()) query = QCombination(QCombination.AND, queries) try: return self.user_model.objects(query).first() except ValidationError: return None def find_role(self, role): return self.role_model.objects(name=role).first() def add_role_to_user(self, user, role): rv = super(MongoEngineUserDatastore, self).add_role_to_user(user, role) if rv: self.put(user) return rv class PeeweeUserDatastore(PeeweeDatastore, UserDatastore): """A PeeweeD datastore implementation for Flask-Security that assumes the use of the Flask-Peewee extension. :param user_model: A user model class definition :param role_model: A role model class definition :param role_link: A model implementing the many-to-many user-role relation """ def __init__(self, db, user_model, role_model, role_link): PeeweeDatastore.__init__(self, db) UserDatastore.__init__(self, user_model, role_model) self.UserRole = role_link def get_user(self, identifier): try: return self.user_model.get(self.user_model.id == identifier) except ValueError: pass for attr in get_identity_attributes(): column = getattr(self.user_model, attr) try: return self.user_model.get(column ** identifier) except self.user_model.DoesNotExist: pass def find_user(self, **kwargs): try: return self.user_model.filter(**kwargs).get() except self.user_model.DoesNotExist: return None def find_role(self, role): try: return self.role_model.filter(name=role).get() except self.role_model.DoesNotExist: return None def create_user(self, **kwargs): """Creates and returns a new user from the given parameters.""" roles = kwargs.pop('roles', []) user = self.user_model(**self._prepare_create_user_args(**kwargs)) user = self.put(user) for role in roles: self.add_role_to_user(user, role) return user def add_role_to_user(self, user, role): """Adds a role tp a user :param user: The user to manipulate :param role: The role to add to the user """ user, role = self._prepare_role_modify_args(user, role) result = self.UserRole.select() \ .where(self.UserRole.user == user.id, self.UserRole.role == role.id) if result.count(): return False else: self.UserRole.create(user=user.id, role=role.id) return True def remove_role_from_user(self, user, role): """Removes a role from a user :param user: The user to manipulate :param role: The role to remove from the user """ user, role = self._prepare_role_modify_args(user, role) result = self.UserRole.select() \ .where(self.UserRole.user == user, self.UserRole.role == role) if result.count(): query = self.UserRole.delete().where( self.UserRole.user == user, self.UserRole.role == role) query.execute() return True else: return False
davidvon/pipa-pay-server
site-packages/flask_security/datastore.py
Python
apache-2.0
10,317
import re import types import numpy as np from numba.cuda.testing import unittest, skip_on_cudasim, CUDATestCase from numba import cuda, jit, int32 from numba.core.errors import TypingError class TestDeviceFunc(CUDATestCase): def test_use_add2f(self): @cuda.jit("float32(float32, float32)", device=True) def add2f(a, b): return a + b def use_add2f(ary): i = cuda.grid(1) ary[i] = add2f(ary[i], ary[i]) compiled = cuda.jit("void(float32[:])")(use_add2f) nelem = 10 ary = np.arange(nelem, dtype=np.float32) exp = ary + ary compiled[1, nelem](ary) self.assertTrue(np.all(ary == exp), (ary, exp)) def test_indirect_add2f(self): @cuda.jit("float32(float32, float32)", device=True) def add2f(a, b): return a + b @cuda.jit("float32(float32, float32)", device=True) def indirect(a, b): return add2f(a, b) def indirect_add2f(ary): i = cuda.grid(1) ary[i] = indirect(ary[i], ary[i]) compiled = cuda.jit("void(float32[:])")(indirect_add2f) nelem = 10 ary = np.arange(nelem, dtype=np.float32) exp = ary + ary compiled[1, nelem](ary) self.assertTrue(np.all(ary == exp), (ary, exp)) def _check_cpu_dispatcher(self, add): @cuda.jit def add_kernel(ary): i = cuda.grid(1) ary[i] = add(ary[i], 1) ary = np.arange(10) expect = ary + 1 add_kernel[1, ary.size](ary) np.testing.assert_equal(expect, ary) def test_cpu_dispatcher(self): # Test correct usage @jit def add(a, b): return a + b self._check_cpu_dispatcher(add) @skip_on_cudasim('not supported in cudasim') def test_cpu_dispatcher_invalid(self): # Test invalid usage # Explicit signature disables compilation, which also disable # compiling on CUDA. @jit('(i4, i4)') def add(a, b): return a + b # Check that the right error message is provided. with self.assertRaises(TypingError) as raises: self._check_cpu_dispatcher(add) msg = "Untyped global name 'add':.*using cpu function on device" expected = re.compile(msg) self.assertTrue(expected.search(str(raises.exception)) is not None) def test_cpu_dispatcher_other_module(self): @jit def add(a, b): return a + b mymod = types.ModuleType(name='mymod') mymod.add = add del add @cuda.jit def add_kernel(ary): i = cuda.grid(1) ary[i] = mymod.add(ary[i], 1) ary = np.arange(10) expect = ary + 1 add_kernel[1, ary.size](ary) np.testing.assert_equal(expect, ary) @skip_on_cudasim('not supported in cudasim') def test_inspect_ptx(self): @cuda.jit(device=True) def foo(x, y): return x + y args = (int32, int32) cres = foo.compile(args) fname = cres.fndesc.mangled_name # Verify that the function name has "foo" in it as in the python name self.assertIn('foo', fname) ptx = foo.inspect_ptx(args) # Check that the compiled function name is in the PTX. self.assertIn(fname, ptx.decode('ascii')) @skip_on_cudasim('not supported in cudasim') def test_inspect_llvm(self): @cuda.jit(device=True) def foo(x, y): return x + y args = (int32, int32) cres = foo.compile(args) fname = cres.fndesc.mangled_name # Verify that the function name has "foo" in it as in the python name self.assertIn('foo', fname) llvm = foo.inspect_llvm(args) # Check that the compiled function name is in the LLVM. self.assertIn(fname, llvm) if __name__ == '__main__': unittest.main()
stonebig/numba
numba/cuda/tests/cudapy/test_device_func.py
Python
bsd-2-clause
3,985
""" Test Scipy functions versus mpmath, if available. """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_, assert_allclose from numpy import pi import pytest import itertools from distutils.version import LooseVersion import scipy.special as sc from scipy._lib.six import with_metaclass from scipy.special._testutils import ( MissingModule, check_version, FuncData, assert_func_equal) from scipy.special._mptestutils import ( Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal, nonfunctional_tooslow, trace_args, time_limited, exception_to_nan, inf_to_nan) from scipy.special._ufuncs import ( _sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx, _igam_fac) try: import mpmath except ImportError: mpmath = MissingModule('mpmath') _is_32bit_platform = np.intp(0).itemsize < 8 # ------------------------------------------------------------------------------ # expi # ------------------------------------------------------------------------------ @check_version(mpmath, '0.10') def test_expi_complex(): dataset = [] for r in np.logspace(-99, 2, 10): for p in np.linspace(0, 2*np.pi, 30): z = r*np.exp(1j*p) dataset.append((z, complex(mpmath.ei(z)))) dataset = np.array(dataset, dtype=np.complex_) FuncData(sc.expi, dataset, 0, 1).check() # ------------------------------------------------------------------------------ # expn # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') def test_expn_large_n(): # Test the transition to the asymptotic regime of n. dataset = [] for n in [50, 51]: for x in np.logspace(0, 4, 200): with mpmath.workdps(100): dataset.append((n, x, float(mpmath.expint(n, x)))) dataset = np.asarray(dataset) FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check() # ------------------------------------------------------------------------------ # hyp0f1 # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') def test_hyp0f1_gh5764(): # Do a small and somewhat systematic test that runs quickly dataset = [] axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5] for v in axis: for x in axis: for y in axis: z = x + 1j*y # mpmath computes the answer correctly at dps ~ 17 but # fails for 20 < dps < 120 (uses a different method); # set the dps high enough that this isn't an issue with mpmath.workdps(120): res = complex(mpmath.hyp0f1(v, z)) dataset.append((v, z, res)) dataset = np.array(dataset) FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2, rtol=1e-13).check() @check_version(mpmath, '0.19') def test_hyp0f1_gh_1609(): # this is a regression test for gh-1609 vv = np.linspace(150, 180, 21) af = sc.hyp0f1(vv, 0.5) mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv]) assert_allclose(af, mf.astype(float), rtol=1e-12) # ------------------------------------------------------------------------------ # hyp2f1 # ------------------------------------------------------------------------------ @check_version(mpmath, '1.0.0') def test_hyp2f1_strange_points(): pts = [ (2, -1, -1, 0.7), # expected: 2.4 (2, -2, -2, 0.7), # expected: 3.87 ] pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4)) pts = [ (a, b, c, x) for a, b, c, x in pts if b == c and round(b) == b and b < 0 and b != -1000 ] kw = dict(eliminate=True) dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() @check_version(mpmath, '0.13') def test_hyp2f1_real_some_points(): pts = [ (1, 2, 3, 0), (1./3, 2./3, 5./6, 27./32), (1./4, 1./2, 3./4, 80./81), (2,-2, -3, 3), (2, -3, -2, 3), (2, -1.5, -1.5, 3), (1, 2, 3, 0), (0.7235, -1, -5, 0.3), (0.25, 1./3, 2, 0.999), (0.25, 1./3, 2, -1), (2, 3, 5, 0.99), (3./2, -0.5, 3, 0.99), (2, 2.5, -3.25, 0.999), (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001), (-10, 900, -10.5, 0.99), (-10, 900, 10.5, 0.99), (-1, 2, 1, 1.0), (-1, 2, 1, -1.0), (-3, 13, 5, 1.0), (-3, 13, 5, -1.0), (0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561 ] dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) olderr = np.seterr(invalid='ignore') try: FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() finally: np.seterr(**olderr) @check_version(mpmath, '0.14') def test_hyp2f1_some_points_2(): # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but # was fixed in their SVN since then pts = [ (112, (51,10), (-9,10), -0.99999), (10,-900,10.5,0.99), (10,-900,-10.5,0.99), ] def fev(x): if isinstance(x, tuple): return float(x[0]) / x[1] else: return x dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() @check_version(mpmath, '0.13') def test_hyp2f1_real_some(): dataset = [] for a in [-10, -5, -1.8, 1.8, 5, 10]: for b in [-2.5, -1, 1, 7.4]: for c in [-9, -1.8, 5, 20.4]: for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]: try: v = float(mpmath.hyp2f1(a, b, c, z)) except Exception: continue dataset.append((a, b, c, z, v)) dataset = np.array(dataset, dtype=np.float_) olderr = np.seterr(invalid='ignore') try: FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9, ignore_inf_sign=True).check() finally: np.seterr(**olderr) @check_version(mpmath, '0.12') @pytest.mark.slow def test_hyp2f1_real_random(): npoints = 500 dataset = np.zeros((npoints, 5), np.float_) np.random.seed(1234) dataset[:, 0] = np.random.pareto(1.5, npoints) dataset[:, 1] = np.random.pareto(1.5, npoints) dataset[:, 2] = np.random.pareto(1.5, npoints) dataset[:, 3] = 2*np.random.rand(npoints) - 1 dataset[:, 0] *= (-1)**np.random.randint(2, npoints) dataset[:, 1] *= (-1)**np.random.randint(2, npoints) dataset[:, 2] *= (-1)**np.random.randint(2, npoints) for ds in dataset: if mpmath.__version__ < '0.14': # mpmath < 0.14 fails for c too much smaller than a, b if abs(ds[:2]).max() > abs(ds[2]): ds[2] = abs(ds[:2]).max() ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4]))) FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check() # ------------------------------------------------------------------------------ # erf (complex) # ------------------------------------------------------------------------------ @check_version(mpmath, '0.14') def test_erf_complex(): # need to increase mpmath precision for this test old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: mpmath.mp.dps = 70 x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11)) x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11)) points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()] assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points, vectorized=False, rtol=1e-13) assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points, vectorized=False, rtol=1e-13) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec # ------------------------------------------------------------------------------ # lpmv # ------------------------------------------------------------------------------ @check_version(mpmath, '0.15') def test_lpmv(): pts = [] for x in [-0.99, -0.557, 1e-6, 0.132, 1]: pts.extend([ (1, 1, x), (1, -1, x), (-1, 1, x), (-1, -2, x), (1, 1.7, x), (1, -1.7, x), (-1, 1.7, x), (-1, -2.7, x), (1, 10, x), (1, 11, x), (3, 8, x), (5, 11, x), (-3, 8, x), (-5, 11, x), (3, -8, x), (5, -11, x), (-3, -8, x), (-5, -11, x), (3, 8.3, x), (5, 11.3, x), (-3, 8.3, x), (-5, 11.3, x), (3, -8.3, x), (5, -11.3, x), (-3, -8.3, x), (-5, -11.3, x), ]) def mplegenp(nu, mu, x): if mu == int(mu) and x == 1: # mpmath 0.17 gets this wrong if mu == 0: return 1 else: return 0 return mpmath.legenp(nu, mu, x) dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts] dataset = np.array(dataset, dtype=np.float_) def evf(mu, nu, x): return sc.lpmv(mu.astype(int), nu, x) olderr = np.seterr(invalid='ignore') try: FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check() finally: np.seterr(**olderr) # ------------------------------------------------------------------------------ # beta # ------------------------------------------------------------------------------ @check_version(mpmath, '0.15') def test_beta(): np.random.seed(1234) b = np.r_[np.logspace(-200, 200, 4), np.logspace(-10, 10, 4), np.logspace(-1, 1, 4), np.arange(-10, 11, 1), np.arange(-10, 11, 1) + 0.5, -1, -2.3, -3, -100.3, -10003.4] a = b ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: mpmath.mp.dps = 400 assert_func_equal(sc.beta, lambda a, b: float(mpmath.beta(a, b)), ab, vectorized=False, rtol=1e-10, ignore_inf_sign=True) assert_func_equal( sc.betaln, lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))), ab, vectorized=False, rtol=1e-10) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec # ------------------------------------------------------------------------------ # loggamma # ------------------------------------------------------------------------------ LOGGAMMA_TAYLOR_RADIUS = 0.2 @check_version(mpmath, '0.19') def test_loggamma_taylor_transition(): # Make sure there isn't a big jump in accuracy when we move from # using the Taylor series to using the recurrence relation. r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1]) theta = np.linspace(0, 2*np.pi, 20) r, theta = np.meshgrid(r, theta) dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.loggamma(z0)))) dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() @check_version(mpmath, '0.19') def test_loggamma_taylor(): # Test around the zeros at z = 1, 2. r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10) theta = np.linspace(0, 2*np.pi, 20) r, theta = np.meshgrid(r, theta) dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.loggamma(z0)))) dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() # ------------------------------------------------------------------------------ # rgamma # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') @pytest.mark.slow def test_rgamma_zeros(): # Test around the zeros at z = 0, -1, -2, ..., -169. (After -169 we # get values that are out of floating point range even when we're # within 0.1 of the zero.) # Can't use too many points here or the test takes forever. dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)] dy = dx.copy() dx, dy = np.meshgrid(dx, dy) dz = dx + 1j*dy zeros = np.arange(0, -170, -1).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() dataset = [] with mpmath.workdps(100): for z0 in z: dataset.append((z0, complex(mpmath.rgamma(z0)))) dataset = np.array(dataset) FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check() # ------------------------------------------------------------------------------ # digamma # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') @pytest.mark.slow def test_digamma_roots(): # Test the special-cased roots for digamma. root = mpmath.findroot(mpmath.digamma, 1.5) roots = [float(root)] root = mpmath.findroot(mpmath.digamma, -0.5) roots.append(float(root)) roots = np.array(roots) # If we test beyond a radius of 0.24 mpmath will take forever. dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24] dy = dx.copy() dx, dy = np.meshgrid(dx, dy) dz = dx + 1j*dy z = (roots + np.dstack((dz,)*roots.size)).flatten() dataset = [] with mpmath.workdps(30): for z0 in z: dataset.append((z0, complex(mpmath.digamma(z0)))) dataset = np.array(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() @check_version(mpmath, '0.19') def test_digamma_negreal(): # Test digamma around the negative real axis. Don't do this in # TestSystematic because the points need some jiggering so that # mpmath doesn't take forever. digamma = exception_to_nan(mpmath.digamma) x = -np.logspace(300, -30, 100) y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)] x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] with mpmath.workdps(40): for z0 in z: res = digamma(z0) dataset.append((z0, complex(res))) dataset = np.asarray(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() @check_version(mpmath, '0.19') def test_digamma_boundary(): # Check that there isn't a jump in accuracy when we switch from # using the asymptotic series to the reflection formula. x = -np.logspace(300, -30, 100) y = np.array([-6.1, -5.9, 5.9, 6.1]) x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] with mpmath.workdps(30): for z0 in z: res = mpmath.digamma(z0) dataset.append((z0, complex(res))) dataset = np.asarray(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() # ------------------------------------------------------------------------------ # gammainc # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') @pytest.mark.slow def test_gammainc_boundary(): # Test the transition to the asymptotic series. small = 20 a = np.linspace(0.5*small, 2*small, 50) x = a.copy() a, x = np.meshgrid(a, x) a, x = a.flatten(), x.flatten() dataset = [] with mpmath.workdps(100): for a0, x0 in zip(a, x): dataset.append((a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True)))) dataset = np.array(dataset) FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check() # ------------------------------------------------------------------------------ # spence # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') @pytest.mark.slow def test_spence_circle(): # The trickiest region for spence is around the circle |z - 1| = 1, # so test that region carefully. def spence(z): return complex(mpmath.polylog(2, 1 - z)) r = np.linspace(0.5, 1.5) theta = np.linspace(0, 2*pi) z = (1 + np.outer(r, np.exp(1j*theta))).flatten() dataset = [] for z0 in z: dataset.append((z0, spence(z0))) dataset = np.array(dataset) FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check() # ------------------------------------------------------------------------------ # sinpi and cospi # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') def test_sinpi_zeros(): eps = np.finfo(float).eps dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] dy = dx.copy() dx, dy = np.meshgrid(dx, dy) dz = dx + 1j*dy zeros = np.arange(-100, 100, 1).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.sinpi(z0)))) dataset = np.array(dataset) FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check() @check_version(mpmath, '0.19') def test_cospi_zeros(): eps = np.finfo(float).eps dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] dy = dx.copy() dx, dy = np.meshgrid(dx, dy) dz = dx + 1j*dy zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.cospi(z0)))) dataset = np.array(dataset) FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check() # ------------------------------------------------------------------------------ # ellipj # ------------------------------------------------------------------------------ @check_version(mpmath, '0.19') def test_dn_quarter_period(): def dn(u, m): return sc.ellipj(u, m)[2] def mpmath_dn(u, m): return float(mpmath.ellipfun("dn", u=u, m=m)) m = np.linspace(0, 1, 20) du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)] dataset = [] for m0 in m: u0 = float(mpmath.ellipk(m0)) for du0 in du: p = u0 + du0 dataset.append((p, m0, mpmath_dn(p, m0))) dataset = np.asarray(dataset) FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check() # ------------------------------------------------------------------------------ # Wright Omega # ------------------------------------------------------------------------------ def _mpmath_wrightomega(z, dps): with mpmath.workdps(dps): z = mpmath.mpc(z) unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi)) res = mpmath.lambertw(mpmath.exp(z), unwind) return res @pytest.mark.slow @check_version(mpmath, '0.19') def test_wrightomega_branch(): x = -np.logspace(10, 0, 25) picut_above = [np.nextafter(np.pi, np.inf)] picut_below = [np.nextafter(np.pi, -np.inf)] npicut_above = [np.nextafter(-np.pi, np.inf)] npicut_below = [np.nextafter(-np.pi, -np.inf)] for i in range(50): picut_above.append(np.nextafter(picut_above[-1], np.inf)) picut_below.append(np.nextafter(picut_below[-1], -np.inf)) npicut_above.append(np.nextafter(npicut_above[-1], np.inf)) npicut_below.append(np.nextafter(npicut_below[-1], -np.inf)) y = np.hstack((picut_above, picut_below, npicut_above, npicut_below)) x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) dataset = np.asarray(dataset) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check() @pytest.mark.slow @check_version(mpmath, '0.19') def test_wrightomega_region1(): # This region gets less coverage in the TestSystematic test x = np.linspace(-2, 1) y = np.linspace(1, 2*np.pi) x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) dataset = np.asarray(dataset) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() @pytest.mark.slow @check_version(mpmath, '0.19') def test_wrightomega_region2(): # This region gets less coverage in the TestSystematic test x = np.linspace(-2, 1) y = np.linspace(-2*np.pi, -1) x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) dataset = np.asarray(dataset) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() # ------------------------------------------------------------------------------ # lambertw # ------------------------------------------------------------------------------ @pytest.mark.slow @check_version(mpmath, '0.19') def test_lambertw_smallz(): x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25) x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.lambertw(z0)))) dataset = np.asarray(dataset) FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check() # ------------------------------------------------------------------------------ # Systematic tests # ------------------------------------------------------------------------------ HYPERKW = dict(maxprec=200, maxterms=200) @pytest.mark.slow @check_version(mpmath, '0.17') class TestSystematic(object): def test_airyai(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [Arg(-1e3, 1e3)]) def test_airyai_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [ComplexArg()]) def test_airyai_prime(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [Arg(-1e3, 1e3)]) def test_airyai_prime_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [ComplexArg()]) def test_airybi(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [Arg(-1e3, 1e3)]) def test_airybi_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [ComplexArg()]) def test_airybi_prime(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [Arg(-1e3, 1e3)]) def test_airybi_prime_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [ComplexArg()]) def test_bei(self): assert_mpmath_equal(sc.bei, exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)), [Arg(-1e3, 1e3)]) def test_ber(self): assert_mpmath_equal(sc.ber, exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)), [Arg(-1e3, 1e3)]) def test_bernoulli(self): assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)], lambda n: float(mpmath.bernoulli(int(n))), [IntArg(0, 13000)], rtol=1e-9, n=13000) def test_besseli(self): assert_mpmath_equal(sc.iv, exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg()], atol=1e-270) def test_besseli_complex(self): assert_mpmath_equal(lambda v, z: sc.iv(v.real, z), exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), [Arg(-1e100, 1e100), ComplexArg()]) def test_besselj(self): assert_mpmath_equal(sc.jv, exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg(-1e3, 1e3)], ignore_inf_sign=True) # loss of precision at large arguments due to oscillation assert_mpmath_equal(sc.jv, exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], ignore_inf_sign=True, rtol=1e-5) def test_besselj_complex(self): assert_mpmath_equal(lambda v, z: sc.jv(v.real, z), exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(), ComplexArg()]) def test_besselk(self): assert_mpmath_equal(sc.kv, mpmath.besselk, [Arg(-200, 200), Arg(0, np.inf)], nan_ok=False, rtol=1e-12) def test_besselk_int(self): assert_mpmath_equal(sc.kn, mpmath.besselk, [IntArg(-200, 200), Arg(0, np.inf)], nan_ok=False, rtol=1e-12) def test_besselk_complex(self): assert_mpmath_equal(lambda v, z: sc.kv(v.real, z), exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)), [Arg(-1e100, 1e100), ComplexArg()]) def test_bessely(self): def mpbessely(v, x): r = float(mpmath.bessely(v, x, **HYPERKW)) if abs(r) > 1e305: # overflowing to inf a bit earlier is OK r = np.inf * np.sign(r) if abs(r) == 0 and x == 0: # invalid result from mpmath, point x=0 is a divergence return np.nan return r assert_mpmath_equal(sc.yv, exception_to_nan(mpbessely), [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], n=5000) def test_bessely_complex(self): def mpbessely(v, x): r = complex(mpmath.bessely(v, x, **HYPERKW)) if abs(r) > 1e305: # overflowing to inf a bit earlier is OK olderr = np.seterr(invalid='ignore') try: r = np.inf * np.sign(r) finally: np.seterr(**olderr) return r assert_mpmath_equal(lambda v, z: sc.yv(v.real, z), exception_to_nan(mpbessely), [Arg(), ComplexArg()], n=15000) def test_bessely_int(self): def mpbessely(v, x): r = float(mpmath.bessely(v, x)) if abs(r) == 0 and x == 0: # invalid result from mpmath, point x=0 is a divergence return np.nan return r assert_mpmath_equal(lambda v, z: sc.yn(int(v), z), exception_to_nan(mpbessely), [IntArg(-1000, 1000), Arg(-1e8, 1e8)]) def test_beta(self): bad_points = [] def beta(a, b, nonzero=False): if a < -1e12 or b < -1e12: # Function is defined here only at integers, but due # to loss of precision this is numerically # ill-defined. Don't compare values here. return np.nan if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0: # close to a zero of the function: mpmath and scipy # will not round here the same, so the test needs to be # run with an absolute tolerance if nonzero: bad_points.append((float(a), float(b))) return np.nan return mpmath.beta(a, b) assert_mpmath_equal(sc.beta, lambda a, b: beta(a, b, nonzero=True), [Arg(), Arg()], dps=400, ignore_inf_sign=True) assert_mpmath_equal(sc.beta, beta, np.array(bad_points), dps=400, ignore_inf_sign=True, atol=1e-11) def test_betainc(self): assert_mpmath_equal(sc.betainc, time_limited()(exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))), [Arg(), Arg(), Arg()]) def test_binom(self): bad_points = [] def binomial(n, k, nonzero=False): if abs(k) > 1e8*(abs(n) + 1): # The binomial is rapidly oscillating in this region, # and the function is numerically ill-defined. Don't # compare values here. return np.nan if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15: # close to a zero of the function: mpmath and scipy # will not round here the same, so the test needs to be # run with an absolute tolerance if nonzero: bad_points.append((float(n), float(k))) return np.nan return mpmath.binomial(n, k) assert_mpmath_equal(sc.binom, lambda n, k: binomial(n, k, nonzero=True), [Arg(), Arg()], dps=400) assert_mpmath_equal(sc.binom, binomial, np.array(bad_points), dps=400, atol=1e-14) def test_chebyt_int(self): assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x), exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)), [IntArg(), Arg()], dps=50) @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") def test_chebyt(self): assert_mpmath_equal(sc.eval_chebyt, lambda n, x: time_limited()(exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW), [Arg(-101, 101), Arg()], n=10000) def test_chebyu_int(self): assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x), exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)), [IntArg(), Arg()], dps=50) @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") def test_chebyu(self): assert_mpmath_equal(sc.eval_chebyu, lambda n, x: time_limited()(exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW), [Arg(-101, 101), Arg()]) def test_chi(self): def chi(x): return sc.shichi(x)[1] assert_mpmath_equal(chi, mpmath.chi, [Arg()]) # check asymptotic series cross-over assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) def test_chi_complex(self): def chi(z): return sc.shichi(z)[1] # chi oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(chi, mpmath.chi, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-12) def test_ci(self): def ci(x): return sc.sici(x)[1] # oscillating function: limit range assert_mpmath_equal(ci, mpmath.ci, [Arg(-1e8, 1e8)]) def test_ci_complex(self): def ci(z): return sc.sici(z)[1] # ci oscillates as Re[z] -> +- inf, so limit range assert_mpmath_equal(ci, mpmath.ci, [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], rtol=1e-8) def test_cospi(self): eps = np.finfo(float).eps assert_mpmath_equal(_cospi, mpmath.cospi, [Arg()], nan_ok=False, rtol=eps) def test_cospi_complex(self): assert_mpmath_equal(_cospi, mpmath.cospi, [ComplexArg()], nan_ok=False, rtol=1e-13) def test_digamma(self): assert_mpmath_equal(sc.digamma, exception_to_nan(mpmath.digamma), [Arg()], rtol=1e-12, dps=50) def test_digamma_complex(self): # Test on a cut plane because mpmath will hang. See # test_digamma_negreal for tests on the negative real axis. def param_filter(z): return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True) assert_mpmath_equal(sc.digamma, exception_to_nan(mpmath.digamma), [ComplexArg()], rtol=1e-13, dps=40, param_filter=param_filter) def test_e1(self): assert_mpmath_equal(sc.exp1, mpmath.e1, [Arg()], rtol=1e-14) def test_e1_complex(self): # E_1 oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(sc.exp1, mpmath.e1, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-11) # Check cross-over region assert_mpmath_equal(sc.exp1, mpmath.e1, (np.linspace(-50, 50, 171)[:, None] + np.r_[0, np.logspace(-3, 2, 61), -np.logspace(-3, 2, 11)]*1j).ravel(), rtol=1e-11) assert_mpmath_equal(sc.exp1, mpmath.e1, (np.linspace(-50, -35, 10000) + 0j), rtol=1e-11) def test_exprel(self): assert_mpmath_equal(sc.exprel, lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), [Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))]) assert_mpmath_equal(sc.exprel, lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11) assert_(np.isinf(sc.exprel(np.inf))) assert_(sc.exprel(-np.inf) == 0) def test_expm1_complex(self): # Oscillates as a function of Im[z], so limit range to avoid loss of precision assert_mpmath_equal(sc.expm1, mpmath.expm1, [ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))]) def test_log1p_complex(self): assert_mpmath_equal(sc.log1p, lambda x: mpmath.log(x+1), [ComplexArg()], dps=60) def test_log1pmx(self): assert_mpmath_equal(_log1pmx, lambda x: mpmath.log(x + 1) - x, [Arg()], dps=60, rtol=1e-14) def test_ei(self): assert_mpmath_equal(sc.expi, mpmath.ei, [Arg()], rtol=1e-11) def test_ei_complex(self): # Ei oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(sc.expi, mpmath.ei, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-9) def test_ellipe(self): assert_mpmath_equal(sc.ellipe, mpmath.ellipe, [Arg(b=1.0)]) def test_ellipeinc(self): assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(-1e3, 1e3), Arg(b=1.0)]) def test_ellipeinc_largephi(self): assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(), Arg()]) def test_ellipf(self): assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(-1e3, 1e3), Arg()]) def test_ellipf_largephi(self): assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(), Arg()]) def test_ellipk(self): assert_mpmath_equal(sc.ellipk, mpmath.ellipk, [Arg(b=1.0)]) assert_mpmath_equal(sc.ellipkm1, lambda m: mpmath.ellipk(1 - m), [Arg(a=0.0)], dps=400) def test_ellipkinc(self): def ellipkinc(phi, m): return mpmath.ellippi(0, phi, m) assert_mpmath_equal(sc.ellipkinc, ellipkinc, [Arg(-1e3, 1e3), Arg(b=1.0)], ignore_inf_sign=True) def test_ellipkinc_largephi(self): def ellipkinc(phi, m): return mpmath.ellippi(0, phi, m) assert_mpmath_equal(sc.ellipkinc, ellipkinc, [Arg(), Arg(b=1.0)], ignore_inf_sign=True) def test_ellipfun_sn(self): def sn(u, m): # mpmath doesn't get the zero at u = 0--fix that if u == 0: return 0 else: return mpmath.ellipfun("sn", u=u, m=m) # Oscillating function --- limit range of first argument; the # loss of precision there is an expected numerical feature # rather than an actual bug assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0], sn, [Arg(-1e6, 1e6), Arg(a=0, b=1)], rtol=1e-8) def test_ellipfun_cn(self): # see comment in ellipfun_sn assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1], lambda u, m: mpmath.ellipfun("cn", u=u, m=m), [Arg(-1e6, 1e6), Arg(a=0, b=1)], rtol=1e-8) def test_ellipfun_dn(self): # see comment in ellipfun_sn assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2], lambda u, m: mpmath.ellipfun("dn", u=u, m=m), [Arg(-1e6, 1e6), Arg(a=0, b=1)], rtol=1e-8) def test_erf(self): assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [Arg()]) def test_erf_complex(self): assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [ComplexArg()], n=200) def test_erfc(self): assert_mpmath_equal(sc.erfc, exception_to_nan(lambda z: mpmath.erfc(z)), [Arg()], rtol=1e-13) def test_erfc_complex(self): assert_mpmath_equal(sc.erfc, exception_to_nan(lambda z: mpmath.erfc(z)), [ComplexArg()], n=200) def test_erfi(self): assert_mpmath_equal(sc.erfi, mpmath.erfi, [Arg()], n=200) def test_erfi_complex(self): assert_mpmath_equal(sc.erfi, mpmath.erfi, [ComplexArg()], n=200) def test_ndtr(self): assert_mpmath_equal(sc.ndtr, exception_to_nan(lambda z: mpmath.ncdf(z)), [Arg()], n=200) def test_ndtr_complex(self): assert_mpmath_equal(sc.ndtr, lambda z: mpmath.erfc(-z/np.sqrt(2.))/2., [ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], n=400) def test_log_ndtr(self): assert_mpmath_equal(sc.log_ndtr, exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))), [Arg()], n=600, dps=300) def test_log_ndtr_complex(self): assert_mpmath_equal(sc.log_ndtr, exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)), [ComplexArg(a=complex(-10000, -100), b=complex(10000, 100))], n=200, dps=300) def test_eulernum(self): assert_mpmath_equal(lambda n: sc.euler(n)[-1], mpmath.eulernum, [IntArg(1, 10000)], n=10000) def test_expint(self): assert_mpmath_equal(sc.expn, mpmath.expint, [IntArg(0, 200), Arg(0, np.inf)], rtol=1e-13, dps=160) def test_fresnels(self): def fresnels(x): return sc.fresnel(x)[0] assert_mpmath_equal(fresnels, mpmath.fresnels, [Arg()]) def test_fresnelc(self): def fresnelc(x): return sc.fresnel(x)[1] assert_mpmath_equal(fresnelc, mpmath.fresnelc, [Arg()]) def test_gamma(self): assert_mpmath_equal(sc.gamma, exception_to_nan(mpmath.gamma), [Arg()]) def test_gamma_complex(self): assert_mpmath_equal(sc.gamma, exception_to_nan(mpmath.gamma), [ComplexArg()], rtol=5e-13) def test_gammainc(self): # Larger arguments are tested in test_data.py:test_local assert_mpmath_equal(sc.gammainc, lambda z, b: mpmath.gammainc(z, b=b, regularized=True), [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], nan_ok=False, rtol=1e-11) def test_gammaincc(self): # Larger arguments are tested in test_data.py:test_local assert_mpmath_equal(sc.gammaincc, lambda z, a: mpmath.gammainc(z, a=a, regularized=True), [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], nan_ok=False, rtol=1e-11) def test_gammaln(self): # The real part of loggamma is log(|gamma(z)|). def f(z): return mpmath.loggamma(z).real assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()]) @pytest.mark.xfail(run=False) def test_gegenbauer(self): assert_mpmath_equal(sc.eval_gegenbauer, exception_to_nan(mpmath.gegenbauer), [Arg(-1e3, 1e3), Arg(), Arg()]) def test_gegenbauer_int(self): # Redefine functions to deal with numerical + mpmath issues def gegenbauer(n, a, x): # Avoid overflow at large `a` (mpmath would need an even larger # dps to handle this correctly, so just skip this region) if abs(a) > 1e100: return np.nan # Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these # always correctly if n == 0: r = 1.0 elif n == 1: r = 2*a*x else: r = mpmath.gegenbauer(n, a, x) # Mpmath 0.17 gives wrong results (spurious zero) in some cases, so # compute the value by perturbing the result if float(r) == 0 and a < -1 and float(a) == int(float(a)): r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x) if abs(r) < mpmath.mpf('1e-50'): r = mpmath.mpf('0.0') # Differing overflow thresholds in scipy vs. mpmath if abs(r) > 1e270: return np.inf return r def sc_gegenbauer(n, a, x): r = sc.eval_gegenbauer(int(n), a, x) # Differing overflow thresholds in scipy vs. mpmath if abs(r) > 1e270: return np.inf return r assert_mpmath_equal(sc_gegenbauer, exception_to_nan(gegenbauer), [IntArg(0, 100), Arg(-1e9, 1e9), Arg()], n=40000, dps=100, ignore_inf_sign=True, rtol=1e-6) # Check the small-x expansion assert_mpmath_equal(sc_gegenbauer, exception_to_nan(gegenbauer), [IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))], dps=100, ignore_inf_sign=True) @pytest.mark.xfail(run=False) def test_gegenbauer_complex(self): assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x), exception_to_nan(mpmath.gegenbauer), [IntArg(0, 100), Arg(), ComplexArg()]) @nonfunctional_tooslow def test_gegenbauer_complex_general(self): assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x), exception_to_nan(mpmath.gegenbauer), [Arg(-1e3, 1e3), Arg(), ComplexArg()]) def test_hankel1(self): assert_mpmath_equal(sc.hankel1, exception_to_nan(lambda v, x: mpmath.hankel1(v, x, **HYPERKW)), [Arg(-1e20, 1e20), Arg()]) def test_hankel2(self): assert_mpmath_equal(sc.hankel2, exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)), [Arg(-1e20, 1e20), Arg()]) @pytest.mark.xfail(run=False, reason="issues at intermediately large orders") def test_hermite(self): assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x), exception_to_nan(mpmath.hermite), [IntArg(0, 10000), Arg()]) # hurwitz: same as zeta def test_hyp0f1(self): # mpmath reports no convergence unless maxterms is large enough KW = dict(maxprec=400, maxterms=1500) # n=500 (non-xslow default) fails for one bad point assert_mpmath_equal(sc.hyp0f1, lambda a, x: mpmath.hyp0f1(a, x, **KW), [Arg(-1e7, 1e7), Arg(0, 1e5)], n=5000) # NB: The range of the second parameter ("z") is limited from below # because of an overflow in the intermediate calculations. The way # for fix it is to implement an asymptotic expansion for Bessel J # (similar to what is implemented for Bessel I here). def test_hyp0f1_complex(self): assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z), exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)), [Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))]) # NB: The range of the first parameter ("v") are limited by an overflow # in the intermediate calculations. Can be fixed by implementing an # asymptotic expansion for Bessel functions for large order. @pytest.mark.xfail(run=False) def test_hyp1f1(self): assert_mpmath_equal(inf_to_nan(sc.hyp1f1), exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e5, 1e5), Arg(-1e5, 1e5), Arg()], n=2000) @pytest.mark.xfail(run=False) def test_hyp1f1_complex(self): assert_mpmath_equal(inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], n=2000) @nonfunctional_tooslow def test_hyp2f1_complex(self): # Scipy's hyp2f1 seems to have performance and accuracy problems assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x), exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()], n=10) @pytest.mark.xfail(run=False) def test_hyperu(self): assert_mpmath_equal(sc.hyperu, exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)), [Arg(), Arg(), Arg()]) @pytest.mark.xfail(condition=_is_32bit_platform, reason="mpmath issue gh-342: unsupported operand mpz, long for pow") def test_igam_fac(self): def mp_igam_fac(a, x): return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a) assert_mpmath_equal(_igam_fac, mp_igam_fac, [Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)], rtol=1e-10) def test_j0(self): # The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x) # and at large arguments the phase of the cosine loses precision. # # This is numerically expected behavior, so we compare only up to # 1e8 = 1e15 * 1e-7 assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e3, 1e3)]) assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e8, 1e8)], rtol=1e-5) def test_j1(self): # See comment in test_j0 assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e3, 1e3)]) assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e8, 1e8)], rtol=1e-5) @pytest.mark.xfail(run=False) def test_jacobi(self): assert_mpmath_equal(sc.eval_jacobi, exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), [Arg(), Arg(), Arg(), Arg()]) assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x), exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), [IntArg(), Arg(), Arg(), Arg()]) def test_jacobi_int(self): # Redefine functions to deal with numerical + mpmath issues def jacobi(n, a, b, x): # Mpmath does not handle n=0 case always correctly if n == 0: return 1.0 return mpmath.jacobi(n, a, b, x) assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x), lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW), [IntArg(), Arg(), Arg(), Arg()], n=20000, dps=50) def test_kei(self): def kei(x): if x == 0: # work around mpmath issue at x=0 return -pi/4 return exception_to_nan(mpmath.kei)(0, x, **HYPERKW) assert_mpmath_equal(sc.kei, kei, [Arg(-1e30, 1e30)], n=1000) def test_ker(self): assert_mpmath_equal(sc.ker, exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)), [Arg(-1e30, 1e30)], n=1000) @nonfunctional_tooslow def test_laguerre(self): assert_mpmath_equal(trace_args(sc.eval_laguerre), lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), [Arg(), Arg()]) def test_laguerre_int(self): assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x), lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), [IntArg(), Arg()], n=20000) @pytest.mark.xfail(condition=_is_32bit_platform, reason="see gh-3551 for bad points") def test_lambertw_real(self): assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k.real)), lambda x, k: mpmath.lambertw(x, int(k.real)), [ComplexArg(-np.inf, np.inf), IntArg(0, 10)], rtol=1e-13, nan_ok=False) def test_lanczos_sum_expg_scaled(self): maxgamma = 171.624376956302725 e = np.exp(1) g = 6.024680040776729583740234375 def gamma(x): with np.errstate(over='ignore'): fac = ((x + g - 0.5)/e)**(x - 0.5) if fac != np.inf: res = fac*_lanczos_sum_expg_scaled(x) else: fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5)) res = fac*_lanczos_sum_expg_scaled(x) res *= fac return res assert_mpmath_equal(gamma, mpmath.gamma, [Arg(0, maxgamma, inclusive_a=False)], rtol=1e-13) @nonfunctional_tooslow def test_legendre(self): assert_mpmath_equal(sc.eval_legendre, mpmath.legendre, [Arg(), Arg()]) def test_legendre_int(self): assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), [IntArg(), Arg()], n=20000) # Check the small-x expansion assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), [IntArg(), FixedArg(np.logspace(-30, -4, 20))]) def test_legenp(self): def lpnm(n, m, z): try: v = sc.lpmn(m, n, z)[0][-1,-1] except ValueError: return np.nan if abs(v) > 1e306: # harmonize overflow to inf v = np.inf * np.sign(v.real) return v def lpnm_2(n, m, z): v = sc.lpmv(m, n, z) if abs(v) > 1e306: # harmonize overflow to inf v = np.inf * np.sign(v.real) return v def legenp(n, m, z): if (z == 1 or z == -1) and int(n) == n: # Special case (mpmath may give inf, we take the limit by # continuity) if m == 0: if n < 0: n = -n - 1 return mpmath.power(mpmath.sign(z), n) else: return 0 if abs(z) < 1e-15: # mpmath has bad performance here return np.nan typ = 2 if abs(z) < 1 else 3 v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ) if abs(v) > 1e306: # harmonize overflow to inf v = mpmath.inf * mpmath.sign(v.real) return v assert_mpmath_equal(lpnm, legenp, [IntArg(-100, 100), IntArg(-100, 100), Arg()]) assert_mpmath_equal(lpnm_2, legenp, [IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)], atol=1e-10) def test_legenp_complex_2(self): def clpnm(n, m, z): try: return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1] except ValueError: return np.nan def legenp(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2) # mpmath is quite slow here x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) y = np.array([-1e3, -0.5, 0.5, 1.3]) z = (x[:,None] + 1j*y[None,:]).ravel() assert_mpmath_equal(clpnm, legenp, [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], rtol=1e-6, n=500) def test_legenp_complex_3(self): def clpnm(n, m, z): try: return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1] except ValueError: return np.nan def legenp(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3) # mpmath is quite slow here x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) y = np.array([-1e3, -0.5, 0.5, 1.3]) z = (x[:,None] + 1j*y[None,:]).ravel() assert_mpmath_equal(clpnm, legenp, [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], rtol=1e-6, n=500) @pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1") def test_legenq(self): def lqnm(n, m, z): return sc.lqmn(m, n, z)[0][-1,-1] def legenq(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return exception_to_nan(mpmath.legenq)(n, m, z, type=2) assert_mpmath_equal(lqnm, legenq, [IntArg(0, 100), IntArg(0, 100), Arg()]) @nonfunctional_tooslow def test_legenq_complex(self): def lqnm(n, m, z): return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1] def legenq(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2) assert_mpmath_equal(lqnm, legenq, [IntArg(0, 100), IntArg(0, 100), ComplexArg()], n=100) def test_lgam1p(self): def param_filter(x): # Filter the poles return np.where((np.floor(x) == x) & (x <= 0), False, True) def mp_lgam1p(z): # The real part of loggamma is log(|gamma(z)|) return mpmath.loggamma(1 + z).real assert_mpmath_equal(_lgam1p, mp_lgam1p, [Arg()], rtol=1e-13, dps=100, param_filter=param_filter) def test_loggamma(self): def mpmath_loggamma(z): try: res = mpmath.loggamma(z) except ValueError: res = complex(np.nan, np.nan) return res assert_mpmath_equal(sc.loggamma, mpmath_loggamma, [ComplexArg()], nan_ok=False, distinguish_nan_and_inf=False, rtol=5e-14) @pytest.mark.xfail(run=False) def test_pcfd(self): def pcfd(v, x): return sc.pbdv(v, x)[0] assert_mpmath_equal(pcfd, exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)), [Arg(), Arg()]) @pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- maybe different definition?") def test_pcfv(self): def pcfv(v, x): return sc.pbvv(v, x)[0] assert_mpmath_equal(pcfv, lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW), [Arg(), Arg()], n=1000) def test_pcfw(self): def pcfw(a, x): return sc.pbwa(a, x)[0] def dpcfw(a, x): return sc.pbwa(a, x)[1] def mpmath_dpcfw(a, x): return mpmath.diff(mpmath.pcfw, (a, x), (0, 1)) # The Zhang and Jin implementation only uses Taylor series and # is thus accurate in only a very small range. assert_mpmath_equal(pcfw, mpmath.pcfw, [Arg(-5, 5), Arg(-5, 5)], rtol=2e-8, n=100) assert_mpmath_equal(dpcfw, mpmath_dpcfw, [Arg(-5, 5), Arg(-5, 5)], rtol=2e-9, n=100) @pytest.mark.xfail(run=False, reason="issues at large arguments (atol OK, rtol not) and <eps-close to z=0") def test_polygamma(self): assert_mpmath_equal(sc.polygamma, time_limited()(exception_to_nan(mpmath.polygamma)), [IntArg(0, 1000), Arg()]) def test_rgamma(self): def rgamma(x): if x < -8000: return np.inf else: v = mpmath.rgamma(x) return v # n=500 (non-xslow default) fails for one bad point assert_mpmath_equal(sc.rgamma, rgamma, [Arg()], n=5000, ignore_inf_sign=True) def test_rgamma_complex(self): assert_mpmath_equal(sc.rgamma, exception_to_nan(mpmath.rgamma), [ComplexArg()], rtol=5e-13) @pytest.mark.xfail(reason=("see gh-3551 for bad points on 32 bit " "systems and gh-8095 for another bad " "point")) def test_rf(self): if LooseVersion(mpmath.__version__) >= LooseVersion("1.0.0"): # no workarounds needed mppoch = mpmath.rf else: def mppoch(a, m): # deal with cases where the result in double precision # hits exactly a non-positive integer, but the # corresponding extended-precision mpf floats don't if float(a + m) == int(a + m) and float(a + m) <= 0: a = mpmath.mpf(a) m = int(a + m) - a return mpmath.rf(a, m) assert_mpmath_equal(sc.poch, mppoch, [Arg(), Arg()], dps=400) def test_sinpi(self): eps = np.finfo(float).eps assert_mpmath_equal(_sinpi, mpmath.sinpi, [Arg()], nan_ok=False, rtol=eps) def test_sinpi_complex(self): assert_mpmath_equal(_sinpi, mpmath.sinpi, [ComplexArg()], nan_ok=False, rtol=2e-14) def test_shi(self): def shi(x): return sc.shichi(x)[0] assert_mpmath_equal(shi, mpmath.shi, [Arg()]) # check asymptotic series cross-over assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) def test_shi_complex(self): def shi(z): return sc.shichi(z)[0] # shi oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(shi, mpmath.shi, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-12) def test_si(self): def si(x): return sc.sici(x)[0] assert_mpmath_equal(si, mpmath.si, [Arg()]) def test_si_complex(self): def si(z): return sc.sici(z)[0] # si oscillates as Re[z] -> +- inf, so limit range assert_mpmath_equal(si, mpmath.si, [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], rtol=1e-12) def test_spence(self): # mpmath uses a different convention for the dilogarithm def dilog(x): return mpmath.polylog(2, 1 - x) # Spence has a branch cut on the negative real axis assert_mpmath_equal(sc.spence, exception_to_nan(dilog), [Arg(0, np.inf)], rtol=1e-14) def test_spence_complex(self): def dilog(z): return mpmath.polylog(2, 1 - z) assert_mpmath_equal(sc.spence, exception_to_nan(dilog), [ComplexArg()], rtol=1e-14) def test_spherharm(self): def spherharm(l, m, theta, phi): if m > l: return np.nan return sc.sph_harm(m, l, phi, theta) assert_mpmath_equal(spherharm, mpmath.spherharm, [IntArg(0, 100), IntArg(0, 100), Arg(a=0, b=pi), Arg(a=0, b=2*pi)], atol=1e-8, n=6000, dps=150) def test_struveh(self): assert_mpmath_equal(sc.struve, exception_to_nan(mpmath.struveh), [Arg(-1e4, 1e4), Arg(0, 1e4)], rtol=5e-10) def test_struvel(self): def mp_struvel(v, z): if v < 0 and z < -v and abs(v) > 1000: # larger DPS needed for correct results old_dps = mpmath.mp.dps try: mpmath.mp.dps = 300 return mpmath.struvel(v, z) finally: mpmath.mp.dps = old_dps return mpmath.struvel(v, z) assert_mpmath_equal(sc.modstruve, exception_to_nan(mp_struvel), [Arg(-1e4, 1e4), Arg(0, 1e4)], rtol=5e-10, ignore_inf_sign=True) def test_wrightomega(self): assert_mpmath_equal(sc.wrightomega, lambda z: _mpmath_wrightomega(z, 25), [ComplexArg()], rtol=1e-14, nan_ok=False) def test_zeta(self): assert_mpmath_equal(sc.zeta, exception_to_nan(mpmath.zeta), [Arg(a=1, b=1e10, inclusive_a=False), Arg(a=0, inclusive_a=False)]) def test_zetac(self): assert_mpmath_equal(sc.zetac, lambda x: mpmath.zeta(x) - 1, [Arg(-100, 100)], nan_ok=False, dps=45, rtol=1e-13) def test_boxcox(self): def mp_boxcox(x, lmbda): x = mpmath.mp.mpf(x) lmbda = mpmath.mp.mpf(lmbda) if lmbda == 0: return mpmath.mp.log(x) else: return mpmath.mp.powm1(x, lmbda) / lmbda assert_mpmath_equal(sc.boxcox, exception_to_nan(mp_boxcox), [Arg(a=0, inclusive_a=False), Arg()], n=200, dps=60, rtol=1e-13) def test_boxcox1p(self): def mp_boxcox1p(x, lmbda): x = mpmath.mp.mpf(x) lmbda = mpmath.mp.mpf(lmbda) one = mpmath.mp.mpf(1) if lmbda == 0: return mpmath.mp.log(one + x) else: return mpmath.mp.powm1(one + x, lmbda) / lmbda assert_mpmath_equal(sc.boxcox1p, exception_to_nan(mp_boxcox1p), [Arg(a=-1, inclusive_a=False), Arg()], n=200, dps=60, rtol=1e-13) def test_spherical_jn(self): def mp_spherical_jn(n, z): arg = mpmath.mpmathify(z) out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n), z), exception_to_nan(mp_spherical_jn), [IntArg(0, 200), Arg(-1e8, 1e8)], dps=300) def test_spherical_jn_complex(self): def mp_spherical_jn(n, z): arg = mpmath.mpmathify(z) out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n.real), z), exception_to_nan(mp_spherical_jn), [IntArg(0, 200), ComplexArg()]) def test_spherical_yn(self): def mp_spherical_yn(n, z): arg = mpmath.mpmathify(z) out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n), z), exception_to_nan(mp_spherical_yn), [IntArg(0, 200), Arg(-1e10, 1e10)], dps=100) def test_spherical_yn_complex(self): def mp_spherical_yn(n, z): arg = mpmath.mpmathify(z) out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n.real), z), exception_to_nan(mp_spherical_yn), [IntArg(0, 200), ComplexArg()]) def test_spherical_in(self): def mp_spherical_in(n, z): arg = mpmath.mpmathify(z) out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n), z), exception_to_nan(mp_spherical_in), [IntArg(0, 200), Arg()], dps=200, atol=10**(-278)) def test_spherical_in_complex(self): def mp_spherical_in(n, z): arg = mpmath.mpmathify(z) out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n.real), z), exception_to_nan(mp_spherical_in), [IntArg(0, 200), ComplexArg()]) def test_spherical_kn(self): def mp_spherical_kn(n, z): out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) * mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z)))) if mpmath.mpmathify(z).imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n), z), exception_to_nan(mp_spherical_kn), [IntArg(0, 150), Arg()], dps=100) @pytest.mark.xfail(run=False, reason="Accuracy issues near z = -1 inherited from kv.") def test_spherical_kn_complex(self): def mp_spherical_kn(n, z): arg = mpmath.mpmathify(z) out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) / mpmath.sqrt(2*arg/mpmath.pi)) if arg.imag == 0: return out.real else: return out assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n.real), z), exception_to_nan(mp_spherical_kn), [IntArg(0, 200), ComplexArg()], dps=200)
gfyoung/scipy
scipy/special/tests/test_mpmath.py
Python
bsd-3-clause
74,561
# -*- coding: utf-8 -*- import re import types from datetime import datetime, timedelta from django.core.exceptions import ValidationError from django.core.validators import * from django.utils.unittest import TestCase NOW = datetime.now() TEST_DATA = ( # (validator, value, expected), (validate_integer, '42', None), (validate_integer, '-42', None), (validate_integer, -42, None), (validate_integer, -42.5, None), (validate_integer, None, ValidationError), (validate_integer, 'a', ValidationError), (validate_email, '[email protected]', None), (validate_email, '[email protected]', None), (validate_email, None, ValidationError), (validate_email, '', ValidationError), (validate_email, 'abc', ValidationError), (validate_email, 'a @x.cz', ValidationError), (validate_email, 'something@@somewhere.com', ValidationError), (validate_slug, 'slug-ok', None), (validate_slug, 'longer-slug-still-ok', None), (validate_slug, '--------', None), (validate_slug, 'nohyphensoranything', None), (validate_slug, '', ValidationError), (validate_slug, ' text ', ValidationError), (validate_slug, ' ', ValidationError), (validate_slug, '[email protected]', ValidationError), (validate_slug, '你好', ValidationError), (validate_slug, '\n', ValidationError), (validate_ipv4_address, '1.1.1.1', None), (validate_ipv4_address, '255.0.0.0', None), (validate_ipv4_address, '0.0.0.0', None), (validate_ipv4_address, '256.1.1.1', ValidationError), (validate_ipv4_address, '25.1.1.', ValidationError), (validate_ipv4_address, '25,1,1,1', ValidationError), (validate_ipv4_address, '25.1 .1.1', ValidationError), (validate_comma_separated_integer_list, '1', None), (validate_comma_separated_integer_list, '1,2,3', None), (validate_comma_separated_integer_list, '1,2,3,', None), (validate_comma_separated_integer_list, '', ValidationError), (validate_comma_separated_integer_list, 'a,b,c', ValidationError), (validate_comma_separated_integer_list, '1, 2, 3', ValidationError), (MaxValueValidator(10), 10, None), (MaxValueValidator(10), -10, None), (MaxValueValidator(10), 0, None), (MaxValueValidator(NOW), NOW, None), (MaxValueValidator(NOW), NOW - timedelta(days=1), None), (MaxValueValidator(0), 1, ValidationError), (MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError), (MinValueValidator(-10), -10, None), (MinValueValidator(-10), 10, None), (MinValueValidator(-10), 0, None), (MinValueValidator(NOW), NOW, None), (MinValueValidator(NOW), NOW + timedelta(days=1), None), (MinValueValidator(0), -1, ValidationError), (MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError), (MaxLengthValidator(10), '', None), (MaxLengthValidator(10), 10*'x', None), (MaxLengthValidator(10), 15*'x', ValidationError), (MinLengthValidator(10), 15*'x', None), (MinLengthValidator(10), 10*'x', None), (MinLengthValidator(10), '', ValidationError), (URLValidator(), 'http://www.djangoproject.com/', None), (URLValidator(), 'http://localhost/', None), (URLValidator(), 'http://example.com/', None), (URLValidator(), 'http://www.example.com/', None), (URLValidator(), 'http://www.example.com:8000/test', None), (URLValidator(), 'http://valid-with-hyphens.com/', None), (URLValidator(), 'http://subdomain.example.com/', None), (URLValidator(), 'http://200.8.9.10/', None), (URLValidator(), 'http://200.8.9.10:8000/test', None), (URLValidator(), 'http://valid-----hyphens.com/', None), (URLValidator(), 'http://example.com?something=value', None), (URLValidator(), 'http://example.com/index.php?something=value&another=value2', None), (URLValidator(), 'foo', ValidationError), (URLValidator(), 'http://', ValidationError), (URLValidator(), 'http://example', ValidationError), (URLValidator(), 'http://example.', ValidationError), (URLValidator(), 'http://.com', ValidationError), (URLValidator(), 'http://invalid-.com', ValidationError), (URLValidator(), 'http://-invalid.com', ValidationError), (URLValidator(), 'http://inv-.alid-.com', ValidationError), (URLValidator(), 'http://inv-.-alid.com', ValidationError), (BaseValidator(True), True, None), (BaseValidator(True), False, ValidationError), (RegexValidator('.*'), '', None), (RegexValidator(re.compile('.*')), '', None), (RegexValidator('.*'), 'xxxxx', None), (RegexValidator('x'), 'y', ValidationError), (RegexValidator(re.compile('x')), 'y', ValidationError), ) def create_simple_test_method(validator, expected, value, num): if expected is not None and issubclass(expected, Exception): test_mask = 'test_%s_raises_error_%d' def test_func(self): self.assertRaises(expected, validator, value) else: test_mask = 'test_%s_%d' def test_func(self): self.assertEqual(expected, validator(value)) if isinstance(validator, types.FunctionType): val_name = validator.__name__ else: val_name = validator.__class__.__name__ test_name = test_mask % (val_name, num) return test_name, test_func # Dynamically assemble a test class with the contents of TEST_DATA class TestSimpleValidators(TestCase): def test_single_message(self): v = ValidationError('Not Valid') self.assertEquals(str(v), "[u'Not Valid']") self.assertEquals(repr(v), "ValidationError([u'Not Valid'])") def test_message_list(self): v = ValidationError(['First Problem', 'Second Problem']) self.assertEquals(str(v), "[u'First Problem', u'Second Problem']") self.assertEquals(repr(v), "ValidationError([u'First Problem', u'Second Problem'])") def test_message_dict(self): v = ValidationError({'first': 'First Problem'}) self.assertEquals(str(v), "{'first': 'First Problem'}") self.assertEquals(repr(v), "ValidationError({'first': 'First Problem'})") test_counter = 0 for validator, value, expected in TEST_DATA: name, method = create_simple_test_method(validator, expected, value, test_counter) setattr(TestSimpleValidators, name, method) test_counter += 1
faun/django_test
tests/modeltests/validators/tests.py
Python
bsd-3-clause
6,315
from unittest import TestCase from chatterbot.storage import StorageAdapter from chatterbot.conversation import Statement, Response class StorageAdapterTestCase(TestCase): """ This test case is for the StorageAdapter base class. Although this class is not intended for direct use, this test case ensures that exceptions requiring basic functionality are triggered when needed. """ def setUp(self): super(StorageAdapterTestCase, self).setUp() self.adapter = StorageAdapter() def test_count(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.count() def test_find(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.find('') def test_filter(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.filter() def test_remove(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.remove('') def test_update(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.update('') def test_get_random(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.get_random() def test_get_response_statements(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.get_response_statements() def test_drop(self): with self.assertRaises(StorageAdapter.AdapterMethodNotImplementedError): self.adapter.drop()
Gustavo6046/ChatterBot
tests/storage_adapter_tests/test_storage_adapter.py
Python
bsd-3-clause
1,702
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from __future__ import division from __future__ import absolute_import import datetime import json import unittest from dashboard.api import alerts from dashboard.api import api_auth from dashboard.common import testing_common from dashboard.common import utils from dashboard.models import anomaly from dashboard.models import report_template from dashboard.models.subscription import Subscription class AlertsGeneralTest(testing_common.TestCase): def setUp(self): super(AlertsGeneralTest, self).setUp() self.SetUpApp([('/api/alerts', alerts.AlertsHandler)]) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) def _Post(self, **params): return json.loads(self.Post('/api/alerts', params).body) def _CreateAnomaly(self, internal_only=False, timestamp=None, bug_id=None, sheriff_name=None, test='master/bot/test_suite/measurement/test_case', start_revision=0, end_revision=100, display_start=0, display_end=100, median_before_anomaly=100, median_after_anomaly=200, is_improvement=False, recovered=False): entity = anomaly.Anomaly() entity.internal_only = internal_only if timestamp: entity.timestamp = timestamp entity.bug_id = bug_id if sheriff_name: entity.subscriptions = [ Subscription( name=sheriff_name, notification_email='[email protected]', ) ] entity.subscription_names = [sheriff_name] if test: entity.test = utils.TestKey(test) entity.start_revision = start_revision entity.end_revision = end_revision entity.display_start = display_start entity.display_end = display_end entity.median_before_anomaly = median_before_anomaly entity.median_after_anomaly = median_after_anomaly entity.is_improvement = is_improvement entity.recovered = recovered return entity.put().urlsafe() def testCountLimit(self): self._CreateAnomaly() self._CreateAnomaly() response = self._Post(limit=1, count_limit=10) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(2, response['count']) def testAllExternal(self): self._CreateAnomaly() self._CreateAnomaly(internal_only=True) response = self._Post() self.assertEqual(1, len(response['anomalies'])) def testKey(self): response = self._Post(key=self._CreateAnomaly()) self.assertEqual(1, len(response['anomalies'])) def testKeyInternal_Internal(self): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) response = self._Post(key=self._CreateAnomaly(internal_only=True)) self.assertEqual(1, len(response['anomalies'])) def testKeyInternal_External(self): response = self._Post(key=self._CreateAnomaly(internal_only=True)) self.assertEqual(0, len(response['anomalies'])) def testBot(self): self._CreateAnomaly() self._CreateAnomaly(test='adept/android/lodging/assessment/story') response = self._Post(bot='android') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('android', response['anomalies'][0]['bot']) def testReport(self): self._CreateAnomaly() self._CreateAnomaly(test='adept/android/lodging/assessment/story') report_template.ReportTemplate( name='foo', id=42, template={ 'rows': [{ 'testSuites': ['lodging'], 'measurement': 'assessment', 'bots': ['adept:android'], 'testCases': ['story'] }] }).put() response = self._Post(report=42) self.assertEqual(1, len(response['anomalies'])) self.assertEqual('android', response['anomalies'][0]['bot']) def testMaster(self): self._CreateAnomaly() self._CreateAnomaly(test='adept/android/lodging/assessment/story') response = self._Post(master='adept') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('adept', response['anomalies'][0]['master']) def testTestSuite(self): self._CreateAnomaly() self._CreateAnomaly(test='adept/android/lodging/assessment/story') response = self._Post(test_suite='lodging') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('lodging', response['anomalies'][0]['testsuite']) def testTest(self): self._CreateAnomaly() self._CreateAnomaly(test='adept/android/lodging/assessment/story') response = self._Post(test='adept/android/lodging/assessment/story') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('assessment/story', response['anomalies'][0]['test']) def testBugId(self): self._CreateAnomaly() self._CreateAnomaly(bug_id=42) response = self._Post(bug_id=42) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(42, response['anomalies'][0]['bug_id']) response = self._Post(bug_id='') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(None, response['anomalies'][0]['bug_id']) def testIsImprovement(self): self._CreateAnomaly() self._CreateAnomaly(is_improvement=True) response = self._Post(is_improvement='true') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(True, response['anomalies'][0]['improvement']) response = self._Post(is_improvement='false') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(False, response['anomalies'][0]['improvement']) def testIsImprovement_Invalid(self): self._CreateAnomaly() self._CreateAnomaly(is_improvement=True) with self.assertRaises(Exception): self._Post(is_improvement='invalid') def testRecovered(self): self._CreateAnomaly() self._CreateAnomaly(recovered=True) response = self._Post(recovered='true') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(True, response['anomalies'][0]['recovered']) response = self._Post(recovered='false') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(False, response['anomalies'][0]['recovered']) def testRecovered_Invalid(self): self._CreateAnomaly() self._CreateAnomaly(recovered=True) with self.assertRaises(Exception): self._Post(recovered='invalid') def testLimit(self): self._CreateAnomaly() self._CreateAnomaly() response = self._Post(limit=1) self.assertEqual(1, len(response['anomalies'])) def testSheriff(self): self._CreateAnomaly(sheriff_name='Chromium Perf Sheriff', start_revision=42) self._CreateAnomaly(sheriff_name='WebRTC Perf Sheriff') response = self._Post(sheriff='Chromium Perf Sheriff') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(42, response['anomalies'][0]['start_revision']) def testMaxStartRevision(self): self._CreateAnomaly() self._CreateAnomaly(start_revision=2) response = self._Post(max_start_revision=1) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(0, response['anomalies'][0]['start_revision']) def testMinStartRevision(self): self._CreateAnomaly() self._CreateAnomaly(start_revision=2) response = self._Post(min_start_revision=1) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(2, response['anomalies'][0]['start_revision']) def testMaxEndRevision(self): self._CreateAnomaly() self._CreateAnomaly(end_revision=200) response = self._Post(max_end_revision=150) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(100, response['anomalies'][0]['end_revision']) def testMinEndRevision(self): self._CreateAnomaly() self._CreateAnomaly(end_revision=200) response = self._Post(min_end_revision=150) self.assertEqual(1, len(response['anomalies'])) self.assertEqual(200, response['anomalies'][0]['end_revision']) def testMaxTimestamp(self): self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(59)) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(61)) response = self._Post(max_timestamp='1970-1-1T0:1:0.000001') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('1970-01-01T00:00:59', response['anomalies'][0]['timestamp']) def testMinTimestamp(self): self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(59)) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(61)) response = self._Post(min_timestamp='1970-1-1T0:1:0') self.assertEqual(1, len(response['anomalies'])) self.assertEqual('1970-01-01T00:01:01', response['anomalies'][0]['timestamp']) def testAllInequalityFilters(self): matching_start_revision = 15 matching_end_revision = 35 matching_timestamp = datetime.datetime.utcfromtimestamp(120) self._CreateAnomaly( start_revision=9, end_revision=matching_end_revision, timestamp=matching_timestamp) self._CreateAnomaly( start_revision=21, end_revision=matching_end_revision, timestamp=matching_timestamp) self._CreateAnomaly( start_revision=matching_start_revision, end_revision=29, timestamp=matching_timestamp) self._CreateAnomaly( start_revision=matching_start_revision, end_revision=41, timestamp=matching_timestamp) self._CreateAnomaly( start_revision=matching_start_revision, end_revision=matching_end_revision, timestamp=datetime.datetime.utcfromtimestamp(181)) self._CreateAnomaly( start_revision=matching_start_revision, end_revision=matching_end_revision, timestamp=datetime.datetime.utcfromtimestamp(59)) self._CreateAnomaly( start_revision=matching_start_revision, end_revision=matching_end_revision, timestamp=matching_timestamp) response = self._Post( min_start_revision=10, max_start_revision=20, min_end_revision=30, max_end_revision=40, min_timestamp='1970-1-1T0:1:0', max_timestamp='1970-1-1T0:3:0') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(matching_start_revision, response['anomalies'][0]['start_revision']) self.assertEqual(matching_end_revision, response['anomalies'][0]['end_revision']) self.assertEqual(matching_timestamp.isoformat(), response['anomalies'][0]['timestamp']) def testUntilFound(self): # inequality_property defaults to the first of 'start_revision', # 'end_revision', 'timestamp' that is filtered. # Filter by start_revision and timestamp so that timestamp will be filtered # post-hoc. Set limit so that the first few queries return alerts that match # the start_revision filter but not the post_filter, so that # QueryAnomaliesUntilFound must automatically chase cursors until it finds # some results. matching_timestamp = datetime.datetime.utcfromtimestamp(60) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(100)) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(90)) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(80)) self._CreateAnomaly(timestamp=datetime.datetime.utcfromtimestamp(70)) self._CreateAnomaly(timestamp=matching_timestamp) response = self._Post( limit=1, min_start_revision=0, max_timestamp='1970-1-1T0:1:0') self.assertEqual(1, len(response['anomalies'])) self.assertEqual(matching_timestamp.isoformat(), response['anomalies'][0]['timestamp']) if __name__ == '__main__': unittest.main()
catapult-project/catapult
dashboard/dashboard/api/alerts_test.py
Python
bsd-3-clause
12,059
# Author: Shao Zhang and Phil Saltzman # Last Updated: 4/19/2005 # # This tutorial is intended as a initial panda scripting lesson going over # display initialization, loading models, placing objects, and the scene graph. # # Step 4: In this step, we will load the rest of the planets up to Mars. # In addition to loading them, we will organize how the planets are grouped # hierarchically in the scene. This will help us rotate them in the next step # to give a rough simulation of the solar system. You can see them move by # running step_5_complete_solar_system.py. import direct.directbase.DirectStart from panda3d.core import NodePath from direct.gui.DirectGui import * import sys class World: def __init__(self): #This is the initialization we had before self.title = OnscreenText( #Create the title text="Panda3D: Tutorial 1 - Solar System", style=1, fg=(1,1,1,1), pos=(0.8,-0.95), scale = .07) base.setBackgroundColor(0, 0, 0) #Set the background to black base.disableMouse() #disable mouse control of the camera camera.setPos ( 0, 0, 45 ) #Set the camera position (X, Y, Z) camera.setHpr ( 0, -90, 0 ) #Set the camera orientation #(heading, pitch, roll) in degrees #This section has our variables. This time we are adding a variable to #control the relative size of the orbits. self.sizescale = 0.6 #relative size of planets self.orbitscale = 10 #relative size of orbits self.loadPlanets() #Load our models and make them render def loadPlanets(self): #Here is where we load all of the planets, and place them. #The first thing we do is create a dummy node for each planet. A dummy #node is simply a node path that does not have any geometry attached to it. #This is done by <NodePath>.attachNewNode('name_of_new_node') #We do this because positioning the planets around a circular orbit could #be done with a lot of messy sine and cosine operations. Instead, we define #our planets to be a given distance from a dummy node, and when we turn the #dummy, the planets will move along with it, kind of like turning the #center of a disc and having an object at its edge move. Most attributes, #like position, orientation, scale, texture, color, etc., are inherited #this way. Panda deals with the fact that the objects are not attached #directly to render (they are attached through other NodePaths to render), #and makes sure the attributes inherit. #This system of attaching NodePaths to each other is called the Scene Graph self.orbit_root_mercury = render.attachNewNode('orbit_root_mercury') self.orbit_root_venus = render.attachNewNode('orbit_root_venus') self.orbit_root_mars = render.attachNewNode('orbit_root_mars') self.orbit_root_earth = render.attachNewNode('orbit_root_earth') #orbit_root_moon is like all the other orbit_root dummy nodes except that #it will be parented to orbit_root_earth so that the moon will orbit the #earth instead of the sun. So, the moon will first inherit #orbit_root_moon's position and then orbit_root_earth's. There is no hard #limit on how many objects can inherit from each other. self.orbit_root_moon = ( self.orbit_root_earth.attachNewNode('orbit_root_moon')) ############################################################### #These are the same steps used to load the sky model that we used in the #last step #Load the model for the sky self.sky = loader.loadModel("models/solar_sky_sphere") #Load the texture for the sky. self.sky_tex = loader.loadTexture("models/stars_1k_tex.jpg") #Set the sky texture to the sky model self.sky.setTexture(self.sky_tex, 1) #Parent the sky model to the render node so that the sky is rendered self.sky.reparentTo(render) #Scale the size of the sky. self.sky.setScale(40) #These are the same steps we used to load the sun in the last step. #Again, we use loader.loadModel since we're using planet_sphere more #than once. self.sun = loader.loadModel("models/planet_sphere") self.sun_tex = loader.loadTexture("models/sun_1k_tex.jpg") self.sun.setTexture(self.sun_tex, 1) self.sun.reparentTo(render) self.sun.setScale(2 * self.sizescale) #Now we load the planets, which we load using the same steps we used to #load the sun. The only difference is that the models are not parented #directly to render for the reasons described above. #The values used for scale are the ratio of the planet's radius to Earth's #radius, multiplied by our global scale variable. In the same way, the #values used for orbit are the ratio of the planet's orbit to Earth's #orbit, multiplied by our global orbit scale variable #Load mercury self.mercury = loader.loadModel("models/planet_sphere") self.mercury_tex = loader.loadTexture("models/mercury_1k_tex.jpg") self.mercury.setTexture(self.mercury_tex, 1) self.mercury.reparentTo(self.orbit_root_mercury) #Set the position of mercury. By default, all nodes are pre assigned the #position (0, 0, 0) when they are first loaded. We didn't reposition the #sun and sky because they are centered in the solar system. Mercury, #however, needs to be offset so we use .setPos to offset the #position of mercury in the X direction with respect to its orbit radius. #We will do this for the rest of the planets. self.mercury.setPos( 0.38 * self.orbitscale, 0, 0) self.mercury.setScale(0.385 * self.sizescale) #Load Venus self.venus = loader.loadModel("models/planet_sphere") self.venus_tex = loader.loadTexture("models/venus_1k_tex.jpg") self.venus.setTexture(self.venus_tex, 1) self.venus.reparentTo(self.orbit_root_venus) self.venus.setPos( 0.72 * self.orbitscale, 0, 0) self.venus.setScale(0.923 * self.sizescale) #Load Mars self.mars = loader.loadModel("models/planet_sphere") self.mars_tex = loader.loadTexture("models/mars_1k_tex.jpg") self.mars.setTexture(self.mars_tex, 1) self.mars.reparentTo(self.orbit_root_mars) self.mars.setPos( 1.52 * self.orbitscale, 0, 0) self.mars.setScale(0.515 * self.sizescale) #Load Earth self.earth = loader.loadModel("models/planet_sphere") self.earth_tex = loader.loadTexture("models/earth_1k_tex.jpg") self.earth.setTexture(self.earth_tex, 1) self.earth.reparentTo(self.orbit_root_earth) self.earth.setScale(self.sizescale) self.earth.setPos( self.orbitscale, 0, 0) #The center of the moon's orbit is exactly the same distance away from #The sun as the Earth's distance from the sun self.orbit_root_moon.setPos( self.orbitscale, 0, 0) #Load the moon self.moon = loader.loadModel("models/planet_sphere") self.moon_tex = loader.loadTexture("models/moon_1k_tex.jpg") self.moon.setTexture(self.moon_tex, 1) self.moon.reparentTo(self.orbit_root_moon) self.moon.setScale(0.1 * self.sizescale) self.moon.setPos(0.1 * self.orbitscale, 0, 0) #end loadPlanets() #end class world w = World() run()
toontownfunserver/Panda3D-1.9.0
samples/Solar-System/Tut-Step-4-Load-System.py
Python
bsd-3-clause
7,262
from django.contrib.auth.models import AnonymousUser from django.test import RequestFactory, TestCase from mock import MagicMock, patch from . import general_context class TestGeneralContext(TestCase): maxDiff = None @patch("evennia.web.utils.general_context.GAME_NAME", "test_name") @patch("evennia.web.utils.general_context.GAME_SLOGAN", "test_game_slogan") @patch( "evennia.web.utils.general_context.WEBSOCKET_CLIENT_ENABLED", "websocket_client_enabled_testvalue", ) @patch("evennia.web.utils.general_context.WEBCLIENT_ENABLED", "webclient_enabled_testvalue") @patch("evennia.web.utils.general_context.WEBSOCKET_PORT", "websocket_client_port_testvalue") @patch("evennia.web.utils.general_context.WEBSOCKET_URL", "websocket_client_url_testvalue") def test_general_context(self): request = RequestFactory().get("/") request.user = AnonymousUser() request.session = {"account": None, "puppet": None} response = general_context.general_context(request) self.assertEqual( response, { "account": None, "puppet": None, "game_name": "test_name", "game_slogan": "test_game_slogan", "evennia_userapps": ["Accounts"], "evennia_entityapps": ["Objects", "Scripts", "Comms", "Help"], "evennia_setupapps": ["Permissions", "Config"], "evennia_connectapps": ["Irc"], "evennia_websiteapps": ["Flatpages", "News", "Sites"], "webclient_enabled": "webclient_enabled_testvalue", "websocket_enabled": "websocket_client_enabled_testvalue", "websocket_port": "websocket_client_port_testvalue", "websocket_url": "websocket_client_url_testvalue", }, ) # spec being an empty list will initially raise AttributeError in set_game_name_and_slogan to test defaults @patch("evennia.web.utils.general_context.settings", spec=[]) @patch("evennia.web.utils.general_context.get_evennia_version") def test_set_game_name_and_slogan(self, mock_get_version, mock_settings): mock_get_version.return_value = "version 1" # test default/fallback values general_context.set_game_name_and_slogan() self.assertEqual(general_context.GAME_NAME, "Evennia") self.assertEqual(general_context.GAME_SLOGAN, "version 1") # test values when the settings are defined mock_settings.SERVERNAME = "test_name" mock_settings.GAME_SLOGAN = "test_game_slogan" general_context.set_game_name_and_slogan() self.assertEqual(general_context.GAME_NAME, "test_name") self.assertEqual(general_context.GAME_SLOGAN, "test_game_slogan") @patch("evennia.web.utils.general_context.settings") def test_set_webclient_settings(self, mock_settings): mock_settings.WEBCLIENT_ENABLED = "webclient" mock_settings.WEBSOCKET_CLIENT_URL = "websocket_url" mock_settings.WEBSOCKET_CLIENT_ENABLED = "websocket_client" mock_settings.WEBSOCKET_CLIENT_PORT = 5000 general_context.set_webclient_settings() self.assertEqual(general_context.WEBCLIENT_ENABLED, "webclient") self.assertEqual(general_context.WEBSOCKET_URL, "websocket_url") self.assertEqual(general_context.WEBSOCKET_CLIENT_ENABLED, "websocket_client") self.assertEqual(general_context.WEBSOCKET_PORT, 5000)
jamesbeebop/evennia
evennia/web/utils/tests.py
Python
bsd-3-clause
3,513
""" .. _tut_viz_evoked: ===================== Visualize Evoked data ===================== """ import os.path as op import numpy as np import matplotlib.pyplot as plt import mne ############################################################################### # In this tutorial we focus on plotting functions of :class:`mne.Evoked`. # First we read the evoked object from a file. Check out # :ref:`tut_epoching_and_averaging` to get to this stage from raw data. data_path = mne.datasets.sample.data_path() fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') evoked = mne.read_evokeds(fname, baseline=(None, 0), proj=True) print(evoked) ############################################################################### # Notice that ``evoked`` is a list of :class:`evoked <mne.Evoked>` instances. # You can read only one of the categories by passing the argument ``condition`` # to :func:`mne.read_evokeds`. To make things more simple for this tutorial, we # read each instance to a variable. evoked_l_aud = evoked[0] evoked_r_aud = evoked[1] evoked_l_vis = evoked[2] evoked_r_vis = evoked[3] ############################################################################### # Let's start with a simple one. We plot event related potentials / fields # (ERP/ERF). The bad channels are not plotted by default. Here we explicitly # set the ``exclude`` parameter to show the bad channels in red. All plotting # functions of MNE-python return a handle to the figure instance. When we have # the handle, we can customise the plots to our liking. fig = evoked_l_aud.plot(exclude=()) ############################################################################### # All plotting functions of MNE-python return a handle to the figure instance. # When we have the handle, we can customise the plots to our liking. For # example, we can get rid of the empty space with a simple function call. fig.tight_layout() ############################################################################### # Now let's make it a bit fancier and only use MEG channels. Many of the # MNE-functions include a ``picks`` parameter to include a selection of # channels. ``picks`` is simply a list of channel indices that you can easily # construct with :func:`mne.pick_types`. See also :func:`mne.pick_channels` and # :func:`mne.pick_channels_regexp`. # Using ``spatial_colors=True``, the individual channel lines are color coded # to show the sensor positions - specifically, the x, y, and z locations of # the sensors are transformed into R, G and B values. picks = mne.pick_types(evoked_l_aud.info, meg=True, eeg=False, eog=False) evoked_l_aud.plot(spatial_colors=True, gfp=True, picks=picks) ############################################################################### # Notice the legend on the left. The colors would suggest that there may be two # separate sources for the signals. This wasn't obvious from the first figure. # Try painting the slopes with left mouse button. It should open a new window # with topomaps (scalp plots) of the average over the painted area. There is # also a function for drawing topomaps separately. evoked_l_aud.plot_topomap() ############################################################################### # By default the topomaps are drawn from evenly spread out points of time over # the evoked data. We can also define the times ourselves. times = np.arange(0.05, 0.151, 0.05) evoked_r_aud.plot_topomap(times=times, ch_type='mag') ############################################################################### # Or we can automatically select the peaks. evoked_r_aud.plot_topomap(times='peaks', ch_type='mag') ############################################################################### # You can take a look at the documentation of :func:`mne.Evoked.plot_topomap` # or simply write ``evoked_r_aud.plot_topomap?`` in your python console to # see the different parameters you can pass to this function. Most of the # plotting functions also accept ``axes`` parameter. With that, you can # customise your plots even further. First we create a set of matplotlib # axes in a single figure and plot all of our evoked categories next to each # other. fig, ax = plt.subplots(1, 5) evoked_l_aud.plot_topomap(times=0.1, axes=ax[0], show=False) evoked_r_aud.plot_topomap(times=0.1, axes=ax[1], show=False) evoked_l_vis.plot_topomap(times=0.1, axes=ax[2], show=False) evoked_r_vis.plot_topomap(times=0.1, axes=ax[3], show=True) ############################################################################### # Notice that we created five axes, but had only four categories. The fifth # axes was used for drawing the colorbar. You must provide room for it when you # create this kind of custom plots or turn the colorbar off with # ``colorbar=False``. That's what the warnings are trying to tell you. Also, we # used ``show=False`` for the three first function calls. This prevents the # showing of the figure prematurely. The behavior depends on the mode you are # using for your python session. See http://matplotlib.org/users/shell.html for # more information. # # We can combine the two kinds of plots in one figure using the # :func:`mne.Evoked.plot_joint` method of Evoked objects. Called as-is # (``evoked.plot_joint()``), this function should give an informative display # of spatio-temporal dynamics. # You can directly style the time series part and the topomap part of the plot # using the ``topomap_args`` and ``ts_args`` parameters. You can pass key-value # pairs as a python dictionary. These are then passed as parameters to the # topomaps (:func:`mne.Evoked.plot_topomap`) and time series # (:func:`mne.Evoked.plot`) of the joint plot. # For an example of specific styling using these ``topomap_args`` and # ``ts_args`` arguments, here, topomaps at specific time points # (70 and 105 msec) are shown, sensors are not plotted (via an argument # forwarded to `plot_topomap`), and the Global Field Power is shown: ts_args = dict(gfp=True) topomap_args = dict(sensors=False) evoked_r_aud.plot_joint(title='right auditory', times=[.07, .105], ts_args=ts_args, topomap_args=topomap_args) ############################################################################### # Sometimes, you may want to compare two or more conditions at a selection of # sensors, or e.g. for the Global Field Power. For this, you can use the # function :func:`mne.viz.plot_compare_evokeds`. The easiest way is to create # a Python dictionary, where the keys are condition names and the values are # :class:`mne.Evoked` objects. If you provide lists of :class:`mne.Evoked` # objects, such as those for multiple subjects, the grand average is plotted, # along with a confidence interval band - this can be used to contrast # conditions for a whole experiment. # First, we load in the evoked objects into a dictionary, setting the keys to # '/'-separated tags (as we can do with event_ids for epochs). Then, we plot # with :func:`mne.viz.plot_compare_evokeds`. # The plot is styled with dictionary arguments, again using "/"-separated tags. # We plot a MEG channel with a strong auditory response. conditions = ["Left Auditory", "Right Auditory", "Left visual", "Right visual"] evoked_dict = dict() for condition in conditions: evoked_dict[condition.replace(" ", "/")] = mne.read_evokeds( fname, baseline=(None, 0), proj=True, condition=condition) print(evoked_dict) colors = dict(Left="Crimson", Right="CornFlowerBlue") linestyles = dict(Auditory='-', visual='--') pick = evoked_dict["Left/Auditory"].ch_names.index('MEG 1811') mne.viz.plot_compare_evokeds(evoked_dict, picks=pick, colors=colors, linestyles=linestyles) ############################################################################### # We can also plot the activations as images. The time runs along the x-axis # and the channels along the y-axis. The amplitudes are color coded so that # the amplitudes from negative to positive translates to shift from blue to # red. White means zero amplitude. You can use the ``cmap`` parameter to define # the color map yourself. The accepted values include all matplotlib colormaps. evoked_r_aud.plot_image(picks=picks) ############################################################################### # Finally we plot the sensor data as a topographical view. In the simple case # we plot only left auditory responses, and then we plot them all in the same # figure for comparison. Click on the individual plots to open them bigger. title = 'MNE sample data (condition : %s)' evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment) colors = 'yellow', 'green', 'red', 'blue' mne.viz.plot_evoked_topo(evoked, color=colors, title=title % 'Left/Right Auditory/Visual') ############################################################################### # Visualizing field lines in 3D # ----------------------------- # We now compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface. # # To do this we'll need coregistration information. See # :ref:`tut_forward` for more details. # # Here we just illustrate usage. subjects_dir = data_path + '/subjects' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' maps = mne.make_field_map(evoked_l_aud, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # explore several points in time field_map = evoked_l_aud.plot_field(maps, time=.1) ############################################################################### # .. note:: # If trans_fname is set to None then only MEG estimates can be visualized.
nicproulx/mne-python
tutorials/plot_visualize_evoked.py
Python
bsd-3-clause
9,661
# coding: utf-8 # Copyright (c) 2012, Machinalis S.R.L. # This file is part of quepy and is distributed under the Modified BSD License. # You should have received a copy of license in the LICENSE file. # # Authors: Rafael Carrascosa <[email protected]> # Gonzalo Garcia Berrotaran <[email protected]> """ Settings. """ # Generated query language LANGUAGE = "sparql" # NLTK config NLTK_DATA_PATH = ["/media/Todos/HMMY/3.thesis/dependencies4project"] # List of paths with NLTK data # Encoding config DEFAULT_ENCODING = "utf-8" # Sparql config SPARQL_PREAMBLE = u""" PREFIX owl: <http://www.w3.org/2002/07/owl#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX foaf: <http://xmlns.com/foaf/0.1/> PREFIX skos: <http://www.w3.org/2004/02/skos/core#> PREFIX quepy: <http://www.machinalis.com/quepy#> """
apostolosSotiropoulos/interQuepy
quepy/settings.py
Python
bsd-3-clause
897
import pytest from mitmproxy.contentviews import javascript from . import full_eval def test_view_javascript(): v = full_eval(javascript.ViewJavaScript()) assert v(b"[1, 2, 3]") assert v(b"[1, 2, 3") assert v(b"function(a){[1, 2, 3]}") == ("JavaScript", [ [('text', 'function(a) {')], [('text', ' [1, 2, 3]')], [('text', '}')] ]) assert v(b"\xfe") # invalid utf-8 @pytest.mark.parametrize("filename", [ "simple.js", ]) def test_format_xml(filename, tdata): path = tdata.path("mitmproxy/contentviews/test_js_data/" + filename) with open(path) as f: input = f.read() with open("-formatted.".join(path.rsplit(".", 1))) as f: expected = f.read() js = javascript.beautify(input) assert js == expected
zlorb/mitmproxy
test/mitmproxy/contentviews/test_javascript.py
Python
mit
791
# Rabbits Multiplying # A (slightly) more realistic model of rabbit multiplication than the Fibonacci # model, would assume that rabbits eventually die. For this question, some # rabbits die from month 6 onwards. # # Thus, we can model the number of rabbits as: # # rabbits(1) = 1 # There is one pair of immature rabbits in Month 1 # rabbits(2) = 1 # There is one pair of mature rabbits in Month 2 # # For months 3-5: # Same as Fibonacci model, no rabbits dying yet # rabbits(n) = rabbits(n - 1) + rabbits(n - 2) # # # For months > 5: # All the rabbits that are over 5 months old die along with a few others # so that the number that die is equal to the number alive 5 months ago. # Before dying, the bunnies reproduce. # rabbits(n) = rabbits(n - 1) + rabbits(n - 2) - rabbits(n - 5) # # This produces the rabbit sequence: 1, 1, 2, 3, 5, 7, 11, 16, 24, 35, 52, ... # # Define a procedure rabbits that takes as input a number n, and returns a # number that is the value of the nth number in the rabbit sequence. # For example, rabbits(10) -> 35. (It is okay if your procedure takes too # long to run on inputs above 30.) def rabbits(n): if n == 1 or n == 2: return 1 elif n < 6: return rabbits(n-1) + rabbits(n-2) else: return rabbits(n-1) + rabbits(n-2) - rabbits(n-5) print rabbits(10) #>>> 35 s = "" for i in range(1,12): s = s + str(rabbits(i)) + " " print s #>>> 1 1 2 3 5 7 11 16 24 35 52
mi1980/projecthadoop3
udacity/cs101-intro-cs/code/lesson6/problem-set/multiplying_rabbits.py
Python
mit
1,473
class BowlingGame: def __init__(self): pass def roll(self, pins): pass def score(self): pass
jmluy/xpython
exercises/practice/bowling/bowling.py
Python
mit
131
# test attributes of builtin range type try: range(0).start except AttributeError: import sys print("SKIP") sys.exit() # attrs print(range(1, 2, 3).start) print(range(1, 2, 3).stop) print(range(1, 2, 3).step) # bad attr (can't store) try: range(4).start = 0 except AttributeError: print('AttributeError')
puuu/micropython
tests/basics/builtin_range_attrs.py
Python
mit
332
# encoding: utf-8 ONE_MILE = float(0.62137) # km units ONE_FEET = float(3.2808) ONE_KILOMETER = float(1000.00) # m units UNIT_KM = 'km' UNIT_M = 'm' MODE_DRIVING = 1 MODE_WALKING = 2 MODE_BICYCLING = 3 MODES = ( (MODE_DRIVING, 'driving'), (MODE_WALKING, 'walking'), (MODE_BICYCLING, 'bicycling') ) AVOID_TOLLS = 1 AVOID_HIGHWAYS = 2 AVOID_FERRIES = 3 AVOIDS = ( (AVOID_TOLLS, 'tools'), (AVOID_HIGHWAYS, 'highways'), (AVOID_FERRIES, 'ferries') )
arcticio/ice-bloc-hdr
utils/external/geolocation/distance_matrix/const.py
Python
mit
479
""" Port of Queue.Queue from the python standard library. """ __all__ = ['Full', 'Empty', 'Queue'] import collections import events from util import priority class Full(Exception): pass class Empty(Exception): pass class QGet(events.TimedOperation): "A operation for the queue get call." __slots__ = ('queue', 'block', 'caller', 'result', 'waiting') def __init__(self, queue, block, **kws): super(QGet, self).__init__(**kws) self.queue = queue self.block = block self.caller = None self.result = None self.waiting = False def finalize(self, sched): super(QGet, self).finalize(sched) return self.result def cleanup(self, sched, coro): if self.waiting: self.queue.waiting_gets.remove(self) return True def process(self, sched, coro): super(QGet, self).process(sched, coro) self.caller = coro if self.queue._empty(): if self.block: self.queue.waiting_gets.append(self) self.waiting = True else: raise Empty else: self.result = self.queue._get() if self.queue.waiting_puts: while not self.queue.full(): putop = self.queue.waiting_puts.popleft() putop.waiting = False self.queue._put(putop.item) if putop.prio & priority.CORO: if putop.prio & priority.OP: sched.active.appendleft((self, coro)) else: sched.active.append((self, coro)) return putop, putop.caller else: if putop.prio & priority.OP: sched.active.appendleft((putop, putop.caller)) else: sched.active.append((putop, putop.caller)) return self, coro def __repr__(self): return "<%s@%X caller:%s block:%s result:%s>" % ( self.__class__.__name__, id(self), self.caller, self.block, self.result ) class QPut(events.TimedOperation): "A operation for the queue put call." __slots__ = ('queue', 'item', 'block', 'caller', 'result', 'waiting') def __init__(self, queue, item, block, **kws): super(QPut, self).__init__(**kws) self.queue = queue self.item = item self.block = block self.caller = None self.waiting = False def cleanup(self, sched, coro): if self.waiting: self.queue.waiting_puts.remove(self) return True def process(self, sched, coro): super(QPut, self).process(sched, coro) self.caller = coro if self.queue._full(): if self.block: self.queue.unfinished_tasks += 1 self.queue.waiting_puts.append(self) self.waiting = True else: raise Full else: self.queue.unfinished_tasks += 1 if self.queue.waiting_gets: getop = self.queue.waiting_gets.popleft() getop.result = self.item getop.waiting = False if self.prio: if self.prio & priority.CORO: sched.active.appendleft((self, coro)) else: sched.active.append((self, coro)) return getop, getop.caller else: if getop.prio: sched.active.appendleft((getop, getop.caller)) else: sched.active.append((getop, getop.caller)) return self, coro else: self.queue._put(self.item) return self, coro def __repr__(self): return "<%s@%X caller:%s block:%s item:%s>" % ( self.__class__.__name__, id(self), self.caller, self.block, self.item ) class QDone(events.Operation): "A operation for the queue done_task call" __slots__ = ('queue',) def __init__(self, queue, **kws): super(QDone, self).__init__(**kws) self.queue = queue def process(self, sched, coro): super(QDone, self).process(sched, coro) if self.queue.joinees: if self.prio & priority.OP: sched.active.extendleft(self.queue.joinees) else: sched.active.extend(self.queue.joinees) return self, coro class QJoin(events.Operation): "A operation for the queue join call." __slots__ = ('queue',) def __init__(self, queue, **kws): super(QJoin, self).__init__(**kws) self.queue = queue def process(self, sched, coro): super(QJoin, self).process(sched, coro) if self.queue.unfinished_tasks == 0: return self, coro else: self.queue.joinees.append( (self, coro) ) class Queue: """This class attempts to mimic the exact functionality of the python standard library Queue.Queue class, but with a coroutine context: * the queue calls return coroutine operations So, to use this you write someting like: .. sourcecode:: python @coroutine def foo(): q = cogen.core.queue.Queue(<size>) yield q.put(123) val = yield q.get() """ def __init__(self, maxsize=0): self._init(maxsize) self.waiting_puts = collections.deque() self.waiting_gets = collections.deque() self.unfinished_tasks = 0 self.joinees = [] def __repr__(self): return "<%s %s wput:%s wget:%s>" % ( self.__class__, self._repr(), self.waiting_puts, self.waiting_gets ) def task_done(self, **kws): """Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. """ unfinished = self.unfinished_tasks - 1 op = None if unfinished <= 0: if unfinished < 0: raise ValueError('task_done() called too many times') op = QDone(self, **kws) self.unfinished_tasks = unfinished return op def join(self): """Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ if self.unfinished_tasks: return QJoin(self) def qsize(self): """Return the approximate size of the queue (not reliable!).""" return self._qsize() def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" return self._empty() def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" return self._full() def put(self, item, block=True, **kws): """Put an item into the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is a positive number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' is ignored in that case). """ return QPut(self, item, block, **kws) def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the Full exception. """ return self.put(item, False) def get(self, block=True, **kws): """Remove and return an item from the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is a positive number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored in that case). """ return QGet(self, block, **kws) def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the Empty exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = collections.deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft() def _repr(self): return repr(self.queue)
pombredanne/cogen
cogen/core/queue.py
Python
mit
10,675
# encoding: utf-8 # The MIT License # # Copyright (c) 2012 the bpython authors. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Helper module for Python 3 compatibility. Defines the following attributes: - PythonLexer: Pygment's Python lexer matching the hosting runtime's Python version. - py3: True if the hosting Python runtime is of Python version 3 or later """ import sys py3 = (sys.version_info[0] == 3) if py3: from pygments.lexers import Python3Lexer as PythonLexer else: from pygments.lexers import PythonLexer
crakensio/django_training
lib/python2.7/site-packages/bpython/_py3compat.py
Python
cc0-1.0
1,574
## opcodes.py # -*- coding: utf-8 -* # # Copyright 2011 David Martínez Moreno <[email protected]> # This software is not affiliated in any way with Facebook, my current employer. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. instructions = { "ADD <dst>, <src>":"Adds <src> to <dst>.\n<dst> may be a register or memory.\n<src> may be a register, memory or immediate value.", "CALL <loc>":"Call a function and return to the next instruction when finished.\n<loc> may be a relative offset from the current location, a register or memory addr.", "CMP <dst>, <src>":"Compare <src> with <dst>.\nSimilar to SUB instruction but does not modify the <dst> operand\nwith the result of the subtraction.", "DEC <dst>":"Subtract 1 from <dst>.\n<dst> may be a register or memory.", "DIV <divisor>":"Divide the EDX:EAX registers (64‐bit combo) by <divisor>.\n<divisor> may be a register or memory.", "INC <dst>":"Add 1 to <dst>.\n<dst> may be a register or memory.", "JE <loc>":"Jump if Equal (ZF=1) to <loc>.", "JGE <loc>":"Jump if Greater or Equal (SF=OF) to <loc>.", "JG <loc>":"Jump if Greater (ZF=0 and SF=OF) to <loc>.", "JLE <loc>":"Jump is Less or Equal (SF<>OF) to <loc>.", "JMP <loc>":"Jump to <loc>. Unconditional.", "JNE <loc>":"Jump if Not Equal (ZF=0) to <loc>.", "JNZ <loc>":"Jump if Not Zero (ZF=0) to <loc>.", "JZ <loc>":"Jump if Zero (ZF=1) to <loc>.", "LEA <dst>, <src>":"Load Effective Address.\nGets a pointer to the memory expression <src> and stores it in <dst>.", "MOV <dst>, <src>":"Move data from <src> to <dst>.\n<src> may be an immediate value, register, or a memory address.\n<dst> may be either a memory address or a register.\nBoth <src> and <dst> may not be memory addresses.", "MUL <src>":"Multiply the EDX:EAX registers (64‐bit combo) by <src>.\n<src> may be a register or memory.", "POP <dst>":"Take a 32‐bit value from the stack and store it in <dst>.\nESP is incremented by 4.\n<dst> may be a register, including segment registers, or memory.", "PUSH <value>":"Adds a 32‐bit value to the top of the stack.\nDecrements ESP by 4.\n<value> may be a register, segment register, memory or immediate value.", "ROL <dst>, <count>":"Bitwise Rotate Left the value in <dst> by <count> bits.\n<dst> may be a register or memory address.\n<count> may be immediate or CL register.", "ROR <dst>, <count>":"Bitwise Rotate Right the value in <dst> by <count> bits.\n<dst> may be a register or memory address.\n<count> may be immediate or CL register.", "SHL <dst>, <count>":"Bitwise Shift Left the value in <dst> by <count> bits.\nZero bits added to the least significant bits.\n<dst> may be reg. or mem. <count> is imm. or CL.", "SHR <dst>, <count>":"Bitwise Shift Right the value in <dst> by <count> bits.\nZero bits added to the least significant bits.\n<dst> may be reg. or mem. <count> is imm. or CL.", "SUB <dst>, <src>":"Subtract <src> from <dst>.\n<src> may be immediate, memory or a register.\n<dst> may be memory or a register.\n(src = dst)‐>ZF=1, (src > dst)‐>CF=1, (src < dst)‐>CF=0 and ZF=0", "TEST <dst>, <src>":"Performs a logical OR operation but does not modify the value in the <dst> operand.\n(src = dst)‐>ZF=1, (src <> dst)‐>ZF=0.", "XCHG <dst, <src>":"Exchange the contents of <src> and <dst>.\nOperands may be register or memory.\nBoth operands may not be memory.", "XOR <dst>, <src>":"Bitwise XOR the value in <src> with the value in <dst>, storing the result in <dst>.\n<dst> may be reg or mem and <src> may be reg, mem or imm." }
merckhung/bokken
ui/opcodes.py
Python
gpl-2.0
4,343
# Copyright (C) 2003-2004, 2006, 2009 The Written Word, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # $Id: ftp.py 694 2010-02-19 14:55:45Z china $ import urllib2 from urlparse import urlparse import depot import pycurl from depot.http import http from misclib import urljoin class ftp (http): """Remote package `Depot` over FTP.""" def __init__ (self, var, local_depot, dep, sub_depot): """Return an FTP `Depot` object.""" # This is better, but doesn't work for classic classes: # super (ftp, self).__init__ (var, local_depot, dep, sub_depot) # So, we're forced to hardcode the superclass for now: http.__init__ (self, var, local_depot, dep, sub_depot) urltuple = urlparse (self.url, 'ftp') [self.host, self.port] = (urltuple.hostname, urltuple.port or 21) def _perform (self, options): """An option-setting wrapper for `pycurl`. :Parameters: `options`: dictionary A dictionary of PYCURL option names and settings. :Exceptions: - `DepotFileNotFoundError`: If a remote file is not found. - `DepotAccessDeniedError`: If remote authentication failed. - `DepotAccessError`: Any other problem fetching. Instantiate pycurl and use it to perform an action based on the passed `options`. """ c = pycurl.Curl () #c.setopt (pycurl.VERBOSE, 1) for (k, v) in options.items (): c.setopt (k, v) self._proxy_auth (c) self._protocol_auth (c) try: c.perform () except pycurl.error, msg: status = msg[0] if status in (pycurl.E_FTP_COULDNT_RETR_FILE, pycurl.E_REMOTE_FILE_NOT_FOUND): raise depot.DepotFileNotFoundError (self.url) elif status in (pycurl.E_LOGIN_DENIED, pycurl.E_FTP_ACCESS_DENIED): raise depot.DepotAccessDeniedError else: raise depot.DepotAccessError (msg[1]) def _protocol_auth (self, c): """Handle FTP specific authentication.""" if self.auth.login: c.setopt (pycurl.USERPWD, self.auth.login + ':' + \ (self.auth.passwd or '')) # XXX ftps not tested (or previously supported) c.setopt (pycurl.FTP_SSL, pycurl.FTPSSL_TRY) c.setopt (pycurl.FTPSSLAUTH, pycurl.FTPAUTH_TLS) def _proxy_auth (self, c): """Handle proxy configuration.""" if self.proxy.host: c.setopt (pycurl.PROXY, self.proxy.host) c.setopt (pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP) c.setopt (pycurl.PROXYAUTH, pycurl.HTTPAUTH_ANY) if self.proxy.login: c.setopt (pycurl.PROXYUSERPWD, self.proxy.login + ':' + \ (self.proxy.passwd or '')) def exists (self, filename): """Test whether `filename` is present at the remote host. :Parameters: `filename`: string Path to remote file relative to remote repository. :Return: - True unless remote file cannot be found. """ # save full url in case of error _url = urljoin (self.pkg_url, filename) try: self._perform ({ pycurl.URL: _url, pycurl.WRITEDATA: open ('/dev/null', 'wb'), # discard headers pycurl.OPT_FILETIME: 1, pycurl.NOBODY: 1, pycurl.NOPROGRESS: 1, pycurl.FAILONERROR: 1, }) except depot.DepotFileNotFoundError: return False return True def set_proxy (self): """Set proxy information. Initiate ftp connection to proxy server with login@host as username. """ if not self.proxy.host: return if self.auth.login: self.auth.login = self.auth.login + '@' + self.host + ':' + \ str (self.port) else: self.auth.login = 'anonymous@' + self.host + ':' + str (self.port) proxytuple = urllib2.splitport (self.proxy.host) [self.host, self.port] = (proxytuple[0], int (proxytuple[1] or 21))
tjyang/sbutils
lib/depot/ftp.py
Python
gpl-2.0
4,505
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.energy', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## wifi-mode.h (module 'wifi'): ns3::WifiCodeRate [enumeration] module.add_enum('WifiCodeRate', ['WIFI_CODE_RATE_UNDEFINED', 'WIFI_CODE_RATE_3_4', 'WIFI_CODE_RATE_2_3', 'WIFI_CODE_RATE_1_2', 'WIFI_CODE_RATE_5_6'], import_from_module='ns.wifi') ## wifi-preamble.h (module 'wifi'): ns3::WifiPreamble [enumeration] module.add_enum('WifiPreamble', ['WIFI_PREAMBLE_LONG', 'WIFI_PREAMBLE_SHORT', 'WIFI_PREAMBLE_HT_MF', 'WIFI_PREAMBLE_HT_GF'], import_from_module='ns.wifi') ## wifi-phy-standard.h (module 'wifi'): ns3::WifiPhyStandard [enumeration] module.add_enum('WifiPhyStandard', ['WIFI_PHY_STANDARD_80211a', 'WIFI_PHY_STANDARD_80211b', 'WIFI_PHY_STANDARD_80211g', 'WIFI_PHY_STANDARD_80211_10MHZ', 'WIFI_PHY_STANDARD_80211_5MHZ', 'WIFI_PHY_STANDARD_holland', 'WIFI_PHY_STANDARD_80211n_2_4GHZ', 'WIFI_PHY_STANDARD_80211n_5GHZ'], import_from_module='ns.wifi') ## wifi-mode.h (module 'wifi'): ns3::WifiModulationClass [enumeration] module.add_enum('WifiModulationClass', ['WIFI_MOD_CLASS_UNKNOWN', 'WIFI_MOD_CLASS_IR', 'WIFI_MOD_CLASS_FHSS', 'WIFI_MOD_CLASS_DSSS', 'WIFI_MOD_CLASS_ERP_PBCC', 'WIFI_MOD_CLASS_DSSS_OFDM', 'WIFI_MOD_CLASS_ERP_OFDM', 'WIFI_MOD_CLASS_OFDM', 'WIFI_MOD_CLASS_HT'], import_from_module='ns.wifi') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer [class] module.add_class('DeviceEnergyModelContainer') ## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper [class] module.add_class('DeviceEnergyModelHelper', allow_subclassing=True) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper [class] module.add_class('EnergySourceHelper', allow_subclassing=True) ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## rv-battery-model-helper.h (module 'energy'): ns3::RvBatteryModelHelper [class] module.add_class('RvBatteryModelHelper', parent=root_module['ns3::EnergySourceHelper']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## traced-value.h (module 'core'): ns3::TracedValue<double> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double']) ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## wifi-mode.h (module 'wifi'): ns3::WifiMode [class] module.add_class('WifiMode', import_from_module='ns.wifi') ## wifi-mode.h (module 'wifi'): ns3::WifiModeFactory [class] module.add_class('WifiModeFactory', import_from_module='ns.wifi') ## wifi-phy.h (module 'wifi'): ns3::WifiPhyListener [class] module.add_class('WifiPhyListener', allow_subclassing=True, import_from_module='ns.wifi') ## wifi-radio-energy-model-helper.h (module 'energy'): ns3::WifiRadioEnergyModelHelper [class] module.add_class('WifiRadioEnergyModelHelper', parent=root_module['ns3::DeviceEnergyModelHelper']) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModelPhyListener [class] module.add_class('WifiRadioEnergyModelPhyListener', parent=root_module['ns3::WifiPhyListener']) ## wifi-tx-vector.h (module 'wifi'): ns3::WifiTxVector [class] module.add_class('WifiTxVector', import_from_module='ns.wifi') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## basic-energy-source-helper.h (module 'energy'): ns3::BasicEnergySourceHelper [class] module.add_class('BasicEnergySourceHelper', parent=root_module['ns3::EnergySourceHelper']) ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## traced-value.h (module 'core'): ns3::TracedValue<ns3::Time> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['ns3::Time']) ## traced-value.h (module 'core'): ns3::TracedValue<ns3::Time> [class] root_module['ns3::TracedValue< ns3::Time >'].implicitly_converts_to(root_module['ns3::Time']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## wifi-phy.h (module 'wifi'): ns3::WifiPhy [class] module.add_class('WifiPhy', import_from_module='ns.wifi', parent=root_module['ns3::Object']) ## wifi-phy.h (module 'wifi'): ns3::WifiPhy::State [enumeration] module.add_enum('State', ['IDLE', 'CCA_BUSY', 'TX', 'RX', 'SWITCHING'], outer_class=root_module['ns3::WifiPhy'], import_from_module='ns.wifi') ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## boolean.h (module 'core'): ns3::BooleanChecker [class] module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## boolean.h (module 'core'): ns3::BooleanValue [class] module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel [class] module.add_class('DeviceEnergyModel', parent=root_module['ns3::Object']) ## double.h (module 'core'): ns3::DoubleValue [class] module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## energy-source.h (module 'energy'): ns3::EnergySource [class] module.add_class('EnergySource', parent=root_module['ns3::Object']) ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer [class] module.add_class('EnergySourceContainer', parent=root_module['ns3::Object']) ## enum.h (module 'core'): ns3::EnumChecker [class] module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## enum.h (module 'core'): ns3::EnumValue [class] module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## integer.h (module 'core'): ns3::IntegerValue [class] module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## li-ion-energy-source.h (module 'energy'): ns3::LiIonEnergySource [class] module.add_class('LiIonEnergySource', parent=root_module['ns3::EnergySource']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## rv-battery-model.h (module 'energy'): ns3::RvBatteryModel [class] module.add_class('RvBatteryModel', parent=root_module['ns3::EnergySource']) ## simple-device-energy-model.h (module 'energy'): ns3::SimpleDeviceEnergyModel [class] module.add_class('SimpleDeviceEnergyModel', parent=root_module['ns3::DeviceEnergyModel']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## uinteger.h (module 'core'): ns3::UintegerValue [class] module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## wifi-mode.h (module 'wifi'): ns3::WifiModeChecker [class] module.add_class('WifiModeChecker', import_from_module='ns.wifi', parent=root_module['ns3::AttributeChecker']) ## wifi-mode.h (module 'wifi'): ns3::WifiModeValue [class] module.add_class('WifiModeValue', import_from_module='ns.wifi', parent=root_module['ns3::AttributeValue']) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModel [class] module.add_class('WifiRadioEnergyModel', parent=root_module['ns3::DeviceEnergyModel']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## basic-energy-source.h (module 'energy'): ns3::BasicEnergySource [class] module.add_class('BasicEnergySource', parent=root_module['ns3::EnergySource']) module.add_container('ns3::WifiModeList', 'ns3::WifiMode', container_type=u'vector') typehandlers.add_type_alias(u'std::vector< unsigned char, std::allocator< unsigned char > >', u'ns3::WifiMcsList') typehandlers.add_type_alias(u'std::vector< unsigned char, std::allocator< unsigned char > >*', u'ns3::WifiMcsList*') typehandlers.add_type_alias(u'std::vector< unsigned char, std::allocator< unsigned char > >&', u'ns3::WifiMcsList&') typehandlers.add_type_alias(u'std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > >', u'ns3::WifiModeList') typehandlers.add_type_alias(u'std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > >*', u'ns3::WifiModeList*') typehandlers.add_type_alias(u'std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > >&', u'ns3::WifiModeList&') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< ns3::WifiMode const *, std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > > >', u'ns3::WifiModeListIterator') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< ns3::WifiMode const *, std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > > >*', u'ns3::WifiModeListIterator*') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< ns3::WifiMode const *, std::vector< ns3::WifiMode, std::allocator< ns3::WifiMode > > >&', u'ns3::WifiModeListIterator&') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< unsigned char const *, std::vector< unsigned char, std::allocator< unsigned char > > >', u'ns3::WifiMcsListIterator') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< unsigned char const *, std::vector< unsigned char, std::allocator< unsigned char > > >*', u'ns3::WifiMcsListIterator*') typehandlers.add_type_alias(u'__gnu_cxx::__normal_iterator< unsigned char const *, std::vector< unsigned char, std::allocator< unsigned char > > >&', u'ns3::WifiMcsListIterator&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace internal nested_module = module.add_cpp_namespace('internal') register_types_ns3_internal(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_internal(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer']) register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper']) register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3RvBatteryModelHelper_methods(root_module, root_module['ns3::RvBatteryModelHelper']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3WifiMode_methods(root_module, root_module['ns3::WifiMode']) register_Ns3WifiModeFactory_methods(root_module, root_module['ns3::WifiModeFactory']) register_Ns3WifiPhyListener_methods(root_module, root_module['ns3::WifiPhyListener']) register_Ns3WifiRadioEnergyModelHelper_methods(root_module, root_module['ns3::WifiRadioEnergyModelHelper']) register_Ns3WifiRadioEnergyModelPhyListener_methods(root_module, root_module['ns3::WifiRadioEnergyModelPhyListener']) register_Ns3WifiTxVector_methods(root_module, root_module['ns3::WifiTxVector']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3BasicEnergySourceHelper_methods(root_module, root_module['ns3::BasicEnergySourceHelper']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TracedValue__Ns3Time_methods(root_module, root_module['ns3::TracedValue< ns3::Time >']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3WifiPhy_methods(root_module, root_module['ns3::WifiPhy']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker']) register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel']) register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource']) register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LiIonEnergySource_methods(root_module, root_module['ns3::LiIonEnergySource']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3RvBatteryModel_methods(root_module, root_module['ns3::RvBatteryModel']) register_Ns3SimpleDeviceEnergyModel_methods(root_module, root_module['ns3::SimpleDeviceEnergyModel']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue']) register_Ns3WifiModeChecker_methods(root_module, root_module['ns3::WifiModeChecker']) register_Ns3WifiModeValue_methods(root_module, root_module['ns3::WifiModeValue']) register_Ns3WifiRadioEnergyModel_methods(root_module, root_module['ns3::WifiRadioEnergyModel']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BasicEnergySource_methods(root_module, root_module['ns3::BasicEnergySource']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3DeviceEnergyModelContainer_methods(root_module, cls): ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'arg0')]) ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer() [constructor] cls.add_constructor([]) ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::Ptr<ns3::DeviceEnergyModel> model) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')]) ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(std::string modelName) [constructor] cls.add_constructor([param('std::string', 'modelName')]) ## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & a, ns3::DeviceEnergyModelContainer const & b) [constructor] cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'a'), param('ns3::DeviceEnergyModelContainer const &', 'b')]) ## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::DeviceEnergyModelContainer container) [member function] cls.add_method('Add', 'void', [param('ns3::DeviceEnergyModelContainer', 'container')]) ## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::Ptr<ns3::DeviceEnergyModel> model) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')]) ## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(std::string modelName) [member function] cls.add_method('Add', 'void', [param('std::string', 'modelName')]) ## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >', [], is_const=True) ## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Clear() [member function] cls.add_method('Clear', 'void', []) ## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >', [], is_const=True) ## device-energy-model-container.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::DeviceEnergyModel >', [param('uint32_t', 'i')], is_const=True) ## device-energy-model-container.h (module 'energy'): uint32_t ns3::DeviceEnergyModelContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3DeviceEnergyModelHelper_methods(root_module, cls): ## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper() [constructor] cls.add_constructor([]) ## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper(ns3::DeviceEnergyModelHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::DeviceEnergyModelHelper const &', 'arg0')]) ## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function] cls.add_method('Install', 'ns3::DeviceEnergyModelContainer', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')], is_const=True) ## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::NetDeviceContainer deviceContainer, ns3::EnergySourceContainer sourceContainer) const [member function] cls.add_method('Install', 'ns3::DeviceEnergyModelContainer', [param('ns3::NetDeviceContainer', 'deviceContainer'), param('ns3::EnergySourceContainer', 'sourceContainer')], is_const=True) ## energy-model-helper.h (module 'energy'): void ns3::DeviceEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_pure_virtual=True, is_virtual=True) ## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function] cls.add_method('DoInstall', 'ns3::Ptr< ns3::DeviceEnergyModel >', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return def register_Ns3EnergySourceHelper_methods(root_module, cls): ## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper() [constructor] cls.add_constructor([]) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper(ns3::EnergySourceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnergySourceHelper const &', 'arg0')]) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Install', 'ns3::EnergySourceContainer', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::NodeContainer c) const [member function] cls.add_method('Install', 'ns3::EnergySourceContainer', [param('ns3::NodeContainer', 'c')], is_const=True) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(std::string nodeName) const [member function] cls.add_method('Install', 'ns3::EnergySourceContainer', [param('std::string', 'nodeName')], is_const=True) ## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::InstallAll() const [member function] cls.add_method('InstallAll', 'ns3::EnergySourceContainer', [], is_const=True) ## energy-model-helper.h (module 'energy'): void ns3::EnergySourceHelper::Set(std::string name, ns3::AttributeValue const & v) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_pure_virtual=True, is_virtual=True) ## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceHelper::DoInstall(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('DoInstall', 'ns3::Ptr< ns3::EnergySource >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3RvBatteryModelHelper_methods(root_module, cls): ## rv-battery-model-helper.h (module 'energy'): ns3::RvBatteryModelHelper::RvBatteryModelHelper(ns3::RvBatteryModelHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::RvBatteryModelHelper const &', 'arg0')]) ## rv-battery-model-helper.h (module 'energy'): ns3::RvBatteryModelHelper::RvBatteryModelHelper() [constructor] cls.add_constructor([]) ## rv-battery-model-helper.h (module 'energy'): void ns3::RvBatteryModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_virtual=True) ## rv-battery-model-helper.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::RvBatteryModelHelper::DoInstall(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('DoInstall', 'ns3::Ptr< ns3::EnergySource >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TracedValue__Double_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [copy constructor] cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor] cls.add_constructor([param('double const &', 'v')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function] cls.add_method('Get', 'double', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function] cls.add_method('Set', 'void', [param('double const &', 'v')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3WifiMode_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## wifi-mode.h (module 'wifi'): ns3::WifiMode::WifiMode(ns3::WifiMode const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiMode const &', 'arg0')]) ## wifi-mode.h (module 'wifi'): ns3::WifiMode::WifiMode() [constructor] cls.add_constructor([]) ## wifi-mode.h (module 'wifi'): ns3::WifiMode::WifiMode(std::string name) [constructor] cls.add_constructor([param('std::string', 'name')]) ## wifi-mode.h (module 'wifi'): uint32_t ns3::WifiMode::GetBandwidth() const [member function] cls.add_method('GetBandwidth', 'uint32_t', [], is_const=True) ## wifi-mode.h (module 'wifi'): ns3::WifiCodeRate ns3::WifiMode::GetCodeRate() const [member function] cls.add_method('GetCodeRate', 'ns3::WifiCodeRate', [], is_const=True) ## wifi-mode.h (module 'wifi'): uint8_t ns3::WifiMode::GetConstellationSize() const [member function] cls.add_method('GetConstellationSize', 'uint8_t', [], is_const=True) ## wifi-mode.h (module 'wifi'): uint64_t ns3::WifiMode::GetDataRate() const [member function] cls.add_method('GetDataRate', 'uint64_t', [], is_const=True) ## wifi-mode.h (module 'wifi'): ns3::WifiModulationClass ns3::WifiMode::GetModulationClass() const [member function] cls.add_method('GetModulationClass', 'ns3::WifiModulationClass', [], is_const=True) ## wifi-mode.h (module 'wifi'): uint64_t ns3::WifiMode::GetPhyRate() const [member function] cls.add_method('GetPhyRate', 'uint64_t', [], is_const=True) ## wifi-mode.h (module 'wifi'): uint32_t ns3::WifiMode::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## wifi-mode.h (module 'wifi'): std::string ns3::WifiMode::GetUniqueName() const [member function] cls.add_method('GetUniqueName', 'std::string', [], is_const=True) ## wifi-mode.h (module 'wifi'): bool ns3::WifiMode::IsMandatory() const [member function] cls.add_method('IsMandatory', 'bool', [], is_const=True) return def register_Ns3WifiModeFactory_methods(root_module, cls): ## wifi-mode.h (module 'wifi'): ns3::WifiModeFactory::WifiModeFactory(ns3::WifiModeFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiModeFactory const &', 'arg0')]) ## wifi-mode.h (module 'wifi'): static ns3::WifiMode ns3::WifiModeFactory::CreateWifiMode(std::string uniqueName, ns3::WifiModulationClass modClass, bool isMandatory, uint32_t bandwidth, uint32_t dataRate, ns3::WifiCodeRate codingRate, uint8_t constellationSize) [member function] cls.add_method('CreateWifiMode', 'ns3::WifiMode', [param('std::string', 'uniqueName'), param('ns3::WifiModulationClass', 'modClass'), param('bool', 'isMandatory'), param('uint32_t', 'bandwidth'), param('uint32_t', 'dataRate'), param('ns3::WifiCodeRate', 'codingRate'), param('uint8_t', 'constellationSize')], is_static=True) return def register_Ns3WifiPhyListener_methods(root_module, cls): ## wifi-phy.h (module 'wifi'): ns3::WifiPhyListener::WifiPhyListener() [constructor] cls.add_constructor([]) ## wifi-phy.h (module 'wifi'): ns3::WifiPhyListener::WifiPhyListener(ns3::WifiPhyListener const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiPhyListener const &', 'arg0')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifyMaybeCcaBusyStart(ns3::Time duration) [member function] cls.add_method('NotifyMaybeCcaBusyStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifyRxEndError() [member function] cls.add_method('NotifyRxEndError', 'void', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifyRxEndOk() [member function] cls.add_method('NotifyRxEndOk', 'void', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifyRxStart(ns3::Time duration) [member function] cls.add_method('NotifyRxStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifySwitchingStart(ns3::Time duration) [member function] cls.add_method('NotifySwitchingStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhyListener::NotifyTxStart(ns3::Time duration) [member function] cls.add_method('NotifyTxStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True) return def register_Ns3WifiRadioEnergyModelHelper_methods(root_module, cls): ## wifi-radio-energy-model-helper.h (module 'energy'): ns3::WifiRadioEnergyModelHelper::WifiRadioEnergyModelHelper(ns3::WifiRadioEnergyModelHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiRadioEnergyModelHelper const &', 'arg0')]) ## wifi-radio-energy-model-helper.h (module 'energy'): ns3::WifiRadioEnergyModelHelper::WifiRadioEnergyModelHelper() [constructor] cls.add_constructor([]) ## wifi-radio-energy-model-helper.h (module 'energy'): void ns3::WifiRadioEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_virtual=True) ## wifi-radio-energy-model-helper.h (module 'energy'): void ns3::WifiRadioEnergyModelHelper::SetDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('SetDepletionCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## wifi-radio-energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::WifiRadioEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function] cls.add_method('DoInstall', 'ns3::Ptr< ns3::DeviceEnergyModel >', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3WifiRadioEnergyModelPhyListener_methods(root_module, cls): ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModelPhyListener::WifiRadioEnergyModelPhyListener(ns3::WifiRadioEnergyModelPhyListener const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiRadioEnergyModelPhyListener const &', 'arg0')]) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModelPhyListener::WifiRadioEnergyModelPhyListener() [constructor] cls.add_constructor([]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifyMaybeCcaBusyStart(ns3::Time duration) [member function] cls.add_method('NotifyMaybeCcaBusyStart', 'void', [param('ns3::Time', 'duration')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifyRxEndError() [member function] cls.add_method('NotifyRxEndError', 'void', [], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifyRxEndOk() [member function] cls.add_method('NotifyRxEndOk', 'void', [], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifyRxStart(ns3::Time duration) [member function] cls.add_method('NotifyRxStart', 'void', [param('ns3::Time', 'duration')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifySwitchingStart(ns3::Time duration) [member function] cls.add_method('NotifySwitchingStart', 'void', [param('ns3::Time', 'duration')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::NotifyTxStart(ns3::Time duration) [member function] cls.add_method('NotifyTxStart', 'void', [param('ns3::Time', 'duration')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModelPhyListener::SetChangeStateCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('SetChangeStateCallback', 'void', [param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) return def register_Ns3WifiTxVector_methods(root_module, cls): cls.add_output_stream_operator() ## wifi-tx-vector.h (module 'wifi'): ns3::WifiTxVector::WifiTxVector(ns3::WifiTxVector const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiTxVector const &', 'arg0')]) ## wifi-tx-vector.h (module 'wifi'): ns3::WifiTxVector::WifiTxVector() [constructor] cls.add_constructor([]) ## wifi-tx-vector.h (module 'wifi'): ns3::WifiTxVector::WifiTxVector(ns3::WifiMode mode, uint8_t powerLevel, uint8_t retries, bool shortGuardInterval, uint8_t nss, uint8_t ness, bool stbc) [constructor] cls.add_constructor([param('ns3::WifiMode', 'mode'), param('uint8_t', 'powerLevel'), param('uint8_t', 'retries'), param('bool', 'shortGuardInterval'), param('uint8_t', 'nss'), param('uint8_t', 'ness'), param('bool', 'stbc')]) ## wifi-tx-vector.h (module 'wifi'): ns3::WifiMode ns3::WifiTxVector::GetMode() const [member function] cls.add_method('GetMode', 'ns3::WifiMode', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): uint8_t ns3::WifiTxVector::GetNess() const [member function] cls.add_method('GetNess', 'uint8_t', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): uint8_t ns3::WifiTxVector::GetNss() const [member function] cls.add_method('GetNss', 'uint8_t', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): uint8_t ns3::WifiTxVector::GetRetries() const [member function] cls.add_method('GetRetries', 'uint8_t', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): uint8_t ns3::WifiTxVector::GetTxPowerLevel() const [member function] cls.add_method('GetTxPowerLevel', 'uint8_t', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): bool ns3::WifiTxVector::IsShortGuardInterval() const [member function] cls.add_method('IsShortGuardInterval', 'bool', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): bool ns3::WifiTxVector::IsStbc() const [member function] cls.add_method('IsStbc', 'bool', [], is_const=True) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetMode(ns3::WifiMode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::WifiMode', 'mode')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetNess(uint8_t ness) [member function] cls.add_method('SetNess', 'void', [param('uint8_t', 'ness')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetNss(uint8_t nss) [member function] cls.add_method('SetNss', 'void', [param('uint8_t', 'nss')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetRetries(uint8_t retries) [member function] cls.add_method('SetRetries', 'void', [param('uint8_t', 'retries')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetShortGuardInterval(bool guardinterval) [member function] cls.add_method('SetShortGuardInterval', 'void', [param('bool', 'guardinterval')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetStbc(bool stbc) [member function] cls.add_method('SetStbc', 'void', [param('bool', 'stbc')]) ## wifi-tx-vector.h (module 'wifi'): void ns3::WifiTxVector::SetTxPowerLevel(uint8_t powerlevel) [member function] cls.add_method('SetTxPowerLevel', 'void', [param('uint8_t', 'powerlevel')]) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3BasicEnergySourceHelper_methods(root_module, cls): ## basic-energy-source-helper.h (module 'energy'): ns3::BasicEnergySourceHelper::BasicEnergySourceHelper(ns3::BasicEnergySourceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::BasicEnergySourceHelper const &', 'arg0')]) ## basic-energy-source-helper.h (module 'energy'): ns3::BasicEnergySourceHelper::BasicEnergySourceHelper() [constructor] cls.add_constructor([]) ## basic-energy-source-helper.h (module 'energy'): void ns3::BasicEnergySourceHelper::Set(std::string name, ns3::AttributeValue const & v) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_virtual=True) ## basic-energy-source-helper.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::BasicEnergySourceHelper::DoInstall(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('DoInstall', 'ns3::Ptr< ns3::EnergySource >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TracedValue__Ns3Time_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<ns3::Time>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<ns3::Time>::TracedValue(ns3::TracedValue<ns3::Time> const & o) [copy constructor] cls.add_constructor([param('ns3::TracedValue< ns3::Time > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<ns3::Time>::TracedValue(ns3::Time const & v) [constructor] cls.add_constructor([param('ns3::Time const &', 'v')]) ## traced-value.h (module 'core'): void ns3::TracedValue<ns3::Time>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<ns3::Time>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<ns3::Time>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<ns3::Time>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): ns3::Time ns3::TracedValue<ns3::Time>::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<ns3::Time>::Set(ns3::Time const & v) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'v')]) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3WifiPhy_methods(root_module, cls): ## wifi-phy.h (module 'wifi'): ns3::WifiPhy::WifiPhy(ns3::WifiPhy const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiPhy const &', 'arg0')]) ## wifi-phy.h (module 'wifi'): ns3::WifiPhy::WifiPhy() [constructor] cls.add_constructor([]) ## wifi-phy.h (module 'wifi'): int64_t ns3::WifiPhy::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): double ns3::WifiPhy::CalculateSnr(ns3::WifiMode txMode, double ber) const [member function] cls.add_method('CalculateSnr', 'double', [param('ns3::WifiMode', 'txMode'), param('double', 'ber')], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): static ns3::Time ns3::WifiPhy::CalculateTxDuration(uint32_t size, ns3::WifiTxVector txvector, ns3::WifiPreamble preamble) [member function] cls.add_method('CalculateTxDuration', 'ns3::Time', [param('uint32_t', 'size'), param('ns3::WifiTxVector', 'txvector'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::ConfigureStandard(ns3::WifiPhyStandard standard) [member function] cls.add_method('ConfigureStandard', 'void', [param('ns3::WifiPhyStandard', 'standard')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetBssMembershipSelector(uint32_t selector) const [member function] cls.add_method('GetBssMembershipSelector', 'uint32_t', [param('uint32_t', 'selector')], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::Ptr<ns3::WifiChannel> ns3::WifiPhy::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::WifiChannel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::GetChannelBonding() const [member function] cls.add_method('GetChannelBonding', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint16_t ns3::WifiPhy::GetChannelNumber() const [member function] cls.add_method('GetChannelNumber', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::Time ns3::WifiPhy::GetDelayUntilIdle() [member function] cls.add_method('GetDelayUntilIdle', 'ns3::Time', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetDsssRate11Mbps() [member function] cls.add_method('GetDsssRate11Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetDsssRate1Mbps() [member function] cls.add_method('GetDsssRate1Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetDsssRate2Mbps() [member function] cls.add_method('GetDsssRate2Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetDsssRate5_5Mbps() [member function] cls.add_method('GetDsssRate5_5Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate12Mbps() [member function] cls.add_method('GetErpOfdmRate12Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate18Mbps() [member function] cls.add_method('GetErpOfdmRate18Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate24Mbps() [member function] cls.add_method('GetErpOfdmRate24Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate36Mbps() [member function] cls.add_method('GetErpOfdmRate36Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate48Mbps() [member function] cls.add_method('GetErpOfdmRate48Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate54Mbps() [member function] cls.add_method('GetErpOfdmRate54Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate6Mbps() [member function] cls.add_method('GetErpOfdmRate6Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetErpOfdmRate9Mbps() [member function] cls.add_method('GetErpOfdmRate9Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetFrequency() const [member function] cls.add_method('GetFrequency', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::GetGreenfield() const [member function] cls.add_method('GetGreenfield', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::GetGuardInterval() const [member function] cls.add_method('GetGuardInterval', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::Time ns3::WifiPhy::GetLastRxStartTime() const [member function] cls.add_method('GetLastRxStartTime', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::GetLdpc() const [member function] cls.add_method('GetLdpc', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetMFPlcpHeaderMode(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble) [member function] cls.add_method('GetMFPlcpHeaderMode', 'ns3::WifiMode', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): uint8_t ns3::WifiPhy::GetMcs(uint8_t mcs) const [member function] cls.add_method('GetMcs', 'uint8_t', [param('uint8_t', 'mcs')], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::WifiModeList ns3::WifiPhy::GetMembershipSelectorModes(uint32_t selector) [member function] cls.add_method('GetMembershipSelectorModes', 'ns3::WifiModeList', [param('uint32_t', 'selector')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::WifiMode ns3::WifiPhy::GetMode(uint32_t mode) const [member function] cls.add_method('GetMode', 'ns3::WifiMode', [param('uint32_t', 'mode')], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetNBssMembershipSelectors() const [member function] cls.add_method('GetNBssMembershipSelectors', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint8_t ns3::WifiPhy::GetNMcs() const [member function] cls.add_method('GetNMcs', 'uint8_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetNModes() const [member function] cls.add_method('GetNModes', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetNTxPower() const [member function] cls.add_method('GetNTxPower', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetNumberOfReceiveAntennas() const [member function] cls.add_method('GetNumberOfReceiveAntennas', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::GetNumberOfTransmitAntennas() const [member function] cls.add_method('GetNumberOfTransmitAntennas', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate108MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate108MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate120MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate120MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate121_5MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate121_5MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate12Mbps() [member function] cls.add_method('GetOfdmRate12Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate12MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate12MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate12MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate12MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate135MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate135MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate135MbpsBW40MHzShGi() [member function] cls.add_method('GetOfdmRate135MbpsBW40MHzShGi', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate13MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate13MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate13_5MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate13_5MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate13_5MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate13_5MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate14_4MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate14_4MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate150MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate150MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate15MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate15MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate18Mbps() [member function] cls.add_method('GetOfdmRate18Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate18MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate18MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate19_5MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate19_5MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate1_5MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate1_5MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate21_7MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate21_7MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate24Mbps() [member function] cls.add_method('GetOfdmRate24Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate24MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate24MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate26MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate26MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate27MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate27MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate27MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate27MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate28_9MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate28_9MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate2_25MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate2_25MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate30MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate30MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate36Mbps() [member function] cls.add_method('GetOfdmRate36Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate39MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate39MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate3MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate3MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate3MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate3MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate40_5MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate40_5MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate43_3MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate43_3MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate45MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate45MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate48Mbps() [member function] cls.add_method('GetOfdmRate48Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate4_5MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate4_5MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate4_5MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate4_5MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate52MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate52MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate54Mbps() [member function] cls.add_method('GetOfdmRate54Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate54MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate54MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate57_8MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate57_8MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate58_5MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate58_5MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate60MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate60MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate65MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate65MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate65MbpsBW20MHzShGi() [member function] cls.add_method('GetOfdmRate65MbpsBW20MHzShGi', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate6Mbps() [member function] cls.add_method('GetOfdmRate6Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate6MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate6MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate6MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate6MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate6_5MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate6_5MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate72_2MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate72_2MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate7_2MbpsBW20MHz() [member function] cls.add_method('GetOfdmRate7_2MbpsBW20MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate81MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate81MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate90MbpsBW40MHz() [member function] cls.add_method('GetOfdmRate90MbpsBW40MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate9Mbps() [member function] cls.add_method('GetOfdmRate9Mbps', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate9MbpsBW10MHz() [member function] cls.add_method('GetOfdmRate9MbpsBW10MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetOfdmRate9MbpsBW5MHz() [member function] cls.add_method('GetOfdmRate9MbpsBW5MHz', 'ns3::WifiMode', [], is_static=True) ## wifi-phy.h (module 'wifi'): static double ns3::WifiPhy::GetPayloadDurationMicroSeconds(uint32_t size, ns3::WifiTxVector txvector) [member function] cls.add_method('GetPayloadDurationMicroSeconds', 'double', [param('uint32_t', 'size'), param('ns3::WifiTxVector', 'txvector')], is_static=True) ## wifi-phy.h (module 'wifi'): static uint32_t ns3::WifiPhy::GetPlcpHeaderDurationMicroSeconds(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble) [member function] cls.add_method('GetPlcpHeaderDurationMicroSeconds', 'uint32_t', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): static ns3::WifiMode ns3::WifiPhy::GetPlcpHeaderMode(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble) [member function] cls.add_method('GetPlcpHeaderMode', 'ns3::WifiMode', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): static uint32_t ns3::WifiPhy::GetPlcpHtSigHeaderDurationMicroSeconds(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble) [member function] cls.add_method('GetPlcpHtSigHeaderDurationMicroSeconds', 'uint32_t', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): static uint32_t ns3::WifiPhy::GetPlcpHtTrainingSymbolDurationMicroSeconds(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble, ns3::WifiTxVector txvector) [member function] cls.add_method('GetPlcpHtTrainingSymbolDurationMicroSeconds', 'uint32_t', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble'), param('ns3::WifiTxVector', 'txvector')], is_static=True) ## wifi-phy.h (module 'wifi'): static uint32_t ns3::WifiPhy::GetPlcpPreambleDurationMicroSeconds(ns3::WifiMode payloadMode, ns3::WifiPreamble preamble) [member function] cls.add_method('GetPlcpPreambleDurationMicroSeconds', 'uint32_t', [param('ns3::WifiMode', 'payloadMode'), param('ns3::WifiPreamble', 'preamble')], is_static=True) ## wifi-phy.h (module 'wifi'): ns3::Time ns3::WifiPhy::GetStateDuration() [member function] cls.add_method('GetStateDuration', 'ns3::Time', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::GetStbc() const [member function] cls.add_method('GetStbc', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): double ns3::WifiPhy::GetTxPowerEnd() const [member function] cls.add_method('GetTxPowerEnd', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): double ns3::WifiPhy::GetTxPowerStart() const [member function] cls.add_method('GetTxPowerStart', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): static ns3::TypeId ns3::WifiPhy::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsModeSupported(ns3::WifiMode mode) const [member function] cls.add_method('IsModeSupported', 'bool', [param('ns3::WifiMode', 'mode')], is_pure_virtual=True, is_const=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateBusy() [member function] cls.add_method('IsStateBusy', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateCcaBusy() [member function] cls.add_method('IsStateCcaBusy', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateIdle() [member function] cls.add_method('IsStateIdle', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateRx() [member function] cls.add_method('IsStateRx', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateSwitching() [member function] cls.add_method('IsStateSwitching', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): bool ns3::WifiPhy::IsStateTx() [member function] cls.add_method('IsStateTx', 'bool', [], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): ns3::WifiMode ns3::WifiPhy::McsToWifiMode(uint8_t mcs) [member function] cls.add_method('McsToWifiMode', 'ns3::WifiMode', [param('uint8_t', 'mcs')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyMonitorSniffRx(ns3::Ptr<const ns3::Packet> packet, uint16_t channelFreqMhz, uint16_t channelNumber, uint32_t rate, bool isShortPreamble, double signalDbm, double noiseDbm) [member function] cls.add_method('NotifyMonitorSniffRx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'channelFreqMhz'), param('uint16_t', 'channelNumber'), param('uint32_t', 'rate'), param('bool', 'isShortPreamble'), param('double', 'signalDbm'), param('double', 'noiseDbm')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyMonitorSniffTx(ns3::Ptr<const ns3::Packet> packet, uint16_t channelFreqMhz, uint16_t channelNumber, uint32_t rate, bool isShortPreamble, uint8_t txPower) [member function] cls.add_method('NotifyMonitorSniffTx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'channelFreqMhz'), param('uint16_t', 'channelNumber'), param('uint32_t', 'rate'), param('bool', 'isShortPreamble'), param('uint8_t', 'txPower')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyRxBegin(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyRxBegin', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyRxDrop(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyRxDrop', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyRxEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyRxEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyTxBegin(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyTxBegin', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyTxDrop(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyTxDrop', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::NotifyTxEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('NotifyTxEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::RegisterListener(ns3::WifiPhyListener * listener) [member function] cls.add_method('RegisterListener', 'void', [param('ns3::WifiPhyListener *', 'listener')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SendPacket(ns3::Ptr<const ns3::Packet> packet, ns3::WifiMode mode, ns3::WifiPreamble preamble, ns3::WifiTxVector txvector) [member function] cls.add_method('SendPacket', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::WifiMode', 'mode'), param('ns3::WifiPreamble', 'preamble'), param('ns3::WifiTxVector', 'txvector')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetChannelBonding(bool channelbonding) [member function] cls.add_method('SetChannelBonding', 'void', [param('bool', 'channelbonding')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetChannelNumber(uint16_t id) [member function] cls.add_method('SetChannelNumber', 'void', [param('uint16_t', 'id')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetFrequency(uint32_t freq) [member function] cls.add_method('SetFrequency', 'void', [param('uint32_t', 'freq')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetGreenfield(bool greenfield) [member function] cls.add_method('SetGreenfield', 'void', [param('bool', 'greenfield')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetGuardInterval(bool guardInterval) [member function] cls.add_method('SetGuardInterval', 'void', [param('bool', 'guardInterval')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetLdpc(bool ldpc) [member function] cls.add_method('SetLdpc', 'void', [param('bool', 'ldpc')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetNumberOfReceiveAntennas(uint32_t rx) [member function] cls.add_method('SetNumberOfReceiveAntennas', 'void', [param('uint32_t', 'rx')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetNumberOfTransmitAntennas(uint32_t tx) [member function] cls.add_method('SetNumberOfTransmitAntennas', 'void', [param('uint32_t', 'tx')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetReceiveErrorCallback(ns3::Callback<void,ns3::Ptr<const ns3::Packet>,double,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('SetReceiveErrorCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetReceiveOkCallback(ns3::Callback<void,ns3::Ptr<ns3::Packet>,double,ns3::WifiMode,ns3::WifiPreamble,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('SetReceiveOkCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::WifiMode, ns3::WifiPreamble, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): void ns3::WifiPhy::SetStbc(bool stbc) [member function] cls.add_method('SetStbc', 'void', [param('bool', 'stbc')], is_pure_virtual=True, is_virtual=True) ## wifi-phy.h (module 'wifi'): uint32_t ns3::WifiPhy::WifiModeToMcs(ns3::WifiMode mode) [member function] cls.add_method('WifiModeToMcs', 'uint32_t', [param('ns3::WifiMode', 'mode')], is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3BooleanChecker_methods(root_module, cls): ## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor] cls.add_constructor([]) ## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')]) return def register_Ns3BooleanValue_methods(root_module, cls): cls.add_output_stream_operator() ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')]) ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor] cls.add_constructor([]) ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor] cls.add_constructor([param('bool', 'value')]) ## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function] cls.add_method('Get', 'bool', [], is_const=True) ## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function] cls.add_method('Set', 'void', [param('bool', 'value')]) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3DeviceEnergyModel_methods(root_module, cls): ## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel(ns3::DeviceEnergyModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::DeviceEnergyModel const &', 'arg0')]) ## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel() [constructor] cls.add_constructor([]) ## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::ChangeState(int newState) [member function] cls.add_method('ChangeState', 'void', [param('int', 'newState')], is_pure_virtual=True, is_virtual=True) ## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetCurrentA() const [member function] cls.add_method('GetCurrentA', 'double', [], is_const=True) ## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetTotalEnergyConsumption() const [member function] cls.add_method('GetTotalEnergyConsumption', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## device-energy-model.h (module 'energy'): static ns3::TypeId ns3::DeviceEnergyModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::HandleEnergyDepletion() [member function] cls.add_method('HandleEnergyDepletion', 'void', [], is_pure_virtual=True, is_virtual=True) ## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function] cls.add_method('SetEnergySource', 'void', [param('ns3::Ptr< ns3::EnergySource >', 'source')], is_pure_virtual=True, is_virtual=True) ## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::DoGetCurrentA() const [member function] cls.add_method('DoGetCurrentA', 'double', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3DoubleValue_methods(root_module, cls): ## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor] cls.add_constructor([]) ## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')]) ## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor] cls.add_constructor([param('double const &', 'value')]) ## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function] cls.add_method('Get', 'double', [], is_const=True) ## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function] cls.add_method('Set', 'void', [param('double const &', 'value')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EnergySource_methods(root_module, cls): ## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource(ns3::EnergySource const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnergySource const &', 'arg0')]) ## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource() [constructor] cls.add_constructor([]) ## energy-source.h (module 'energy'): void ns3::EnergySource::AppendDeviceEnergyModel(ns3::Ptr<ns3::DeviceEnergyModel> deviceEnergyModelPtr) [member function] cls.add_method('AppendDeviceEnergyModel', 'void', [param('ns3::Ptr< ns3::DeviceEnergyModel >', 'deviceEnergyModelPtr')]) ## energy-source.h (module 'energy'): void ns3::EnergySource::DisposeDeviceModels() [member function] cls.add_method('DisposeDeviceModels', 'void', []) ## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(ns3::TypeId tid) [member function] cls.add_method('FindDeviceEnergyModels', 'ns3::DeviceEnergyModelContainer', [param('ns3::TypeId', 'tid')]) ## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(std::string name) [member function] cls.add_method('FindDeviceEnergyModels', 'ns3::DeviceEnergyModelContainer', [param('std::string', 'name')]) ## energy-source.h (module 'energy'): double ns3::EnergySource::GetEnergyFraction() [member function] cls.add_method('GetEnergyFraction', 'double', [], is_pure_virtual=True, is_virtual=True) ## energy-source.h (module 'energy'): double ns3::EnergySource::GetInitialEnergy() const [member function] cls.add_method('GetInitialEnergy', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## energy-source.h (module 'energy'): ns3::Ptr<ns3::Node> ns3::EnergySource::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True) ## energy-source.h (module 'energy'): double ns3::EnergySource::GetRemainingEnergy() [member function] cls.add_method('GetRemainingEnergy', 'double', [], is_pure_virtual=True, is_virtual=True) ## energy-source.h (module 'energy'): double ns3::EnergySource::GetSupplyVoltage() const [member function] cls.add_method('GetSupplyVoltage', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## energy-source.h (module 'energy'): static ns3::TypeId ns3::EnergySource::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## energy-source.h (module 'energy'): void ns3::EnergySource::InitializeDeviceModels() [member function] cls.add_method('InitializeDeviceModels', 'void', []) ## energy-source.h (module 'energy'): void ns3::EnergySource::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## energy-source.h (module 'energy'): void ns3::EnergySource::UpdateEnergySource() [member function] cls.add_method('UpdateEnergySource', 'void', [], is_pure_virtual=True, is_virtual=True) ## energy-source.h (module 'energy'): void ns3::EnergySource::BreakDeviceEnergyModelRefCycle() [member function] cls.add_method('BreakDeviceEnergyModelRefCycle', 'void', [], visibility='protected') ## energy-source.h (module 'energy'): double ns3::EnergySource::CalculateTotalCurrent() [member function] cls.add_method('CalculateTotalCurrent', 'double', [], visibility='protected') ## energy-source.h (module 'energy'): void ns3::EnergySource::NotifyEnergyDrained() [member function] cls.add_method('NotifyEnergyDrained', 'void', [], visibility='protected') ## energy-source.h (module 'energy'): void ns3::EnergySource::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EnergySourceContainer_methods(root_module, cls): ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnergySourceContainer const &', 'arg0')]) ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer() [constructor] cls.add_constructor([]) ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::Ptr<ns3::EnergySource> source) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EnergySource >', 'source')]) ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(std::string sourceName) [constructor] cls.add_constructor([param('std::string', 'sourceName')]) ## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & a, ns3::EnergySourceContainer const & b) [constructor] cls.add_constructor([param('ns3::EnergySourceContainer const &', 'a'), param('ns3::EnergySourceContainer const &', 'b')]) ## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::EnergySourceContainer container) [member function] cls.add_method('Add', 'void', [param('ns3::EnergySourceContainer', 'container')]) ## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::Ptr<ns3::EnergySource> source) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::EnergySource >', 'source')]) ## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(std::string sourceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'sourceName')]) ## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >', [], is_const=True) ## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >', [], is_const=True) ## energy-source-container.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::EnergySource >', [param('uint32_t', 'i')], is_const=True) ## energy-source-container.h (module 'energy'): uint32_t ns3::EnergySourceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## energy-source-container.h (module 'energy'): static ns3::TypeId ns3::EnergySourceContainer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EnumChecker_methods(root_module, cls): ## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')]) ## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor] cls.add_constructor([]) ## enum.h (module 'core'): void ns3::EnumChecker::Add(int v, std::string name) [member function] cls.add_method('Add', 'void', [param('int', 'v'), param('std::string', 'name')]) ## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int v, std::string name) [member function] cls.add_method('AddDefault', 'void', [param('int', 'v'), param('std::string', 'name')]) ## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')], is_const=True, is_virtual=True) ## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EnumValue_methods(root_module, cls): ## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnumValue const &', 'arg0')]) ## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor] cls.add_constructor([]) ## enum.h (module 'core'): ns3::EnumValue::EnumValue(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function] cls.add_method('Get', 'int', [], is_const=True) ## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## enum.h (module 'core'): void ns3::EnumValue::Set(int v) [member function] cls.add_method('Set', 'void', [param('int', 'v')]) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3IntegerValue_methods(root_module, cls): ## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor] cls.add_constructor([]) ## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')]) ## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor] cls.add_constructor([param('int64_t const &', 'value')]) ## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function] cls.add_method('Get', 'int64_t', [], is_const=True) ## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function] cls.add_method('Set', 'void', [param('int64_t const &', 'value')]) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3LiIonEnergySource_methods(root_module, cls): ## li-ion-energy-source.h (module 'energy'): ns3::LiIonEnergySource::LiIonEnergySource(ns3::LiIonEnergySource const & arg0) [copy constructor] cls.add_constructor([param('ns3::LiIonEnergySource const &', 'arg0')]) ## li-ion-energy-source.h (module 'energy'): ns3::LiIonEnergySource::LiIonEnergySource() [constructor] cls.add_constructor([]) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::DecreaseRemainingEnergy(double energyJ) [member function] cls.add_method('DecreaseRemainingEnergy', 'void', [param('double', 'energyJ')], is_virtual=True) ## li-ion-energy-source.h (module 'energy'): double ns3::LiIonEnergySource::GetEnergyFraction() [member function] cls.add_method('GetEnergyFraction', 'double', [], is_virtual=True) ## li-ion-energy-source.h (module 'energy'): ns3::Time ns3::LiIonEnergySource::GetEnergyUpdateInterval() const [member function] cls.add_method('GetEnergyUpdateInterval', 'ns3::Time', [], is_const=True) ## li-ion-energy-source.h (module 'energy'): double ns3::LiIonEnergySource::GetInitialEnergy() const [member function] cls.add_method('GetInitialEnergy', 'double', [], is_const=True, is_virtual=True) ## li-ion-energy-source.h (module 'energy'): double ns3::LiIonEnergySource::GetRemainingEnergy() [member function] cls.add_method('GetRemainingEnergy', 'double', [], is_virtual=True) ## li-ion-energy-source.h (module 'energy'): double ns3::LiIonEnergySource::GetSupplyVoltage() const [member function] cls.add_method('GetSupplyVoltage', 'double', [], is_const=True, is_virtual=True) ## li-ion-energy-source.h (module 'energy'): static ns3::TypeId ns3::LiIonEnergySource::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::IncreaseRemainingEnergy(double energyJ) [member function] cls.add_method('IncreaseRemainingEnergy', 'void', [param('double', 'energyJ')], is_virtual=True) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::SetEnergyUpdateInterval(ns3::Time interval) [member function] cls.add_method('SetEnergyUpdateInterval', 'void', [param('ns3::Time', 'interval')]) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::SetInitialEnergy(double initialEnergyJ) [member function] cls.add_method('SetInitialEnergy', 'void', [param('double', 'initialEnergyJ')]) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::SetInitialSupplyVoltage(double supplyVoltageV) [member function] cls.add_method('SetInitialSupplyVoltage', 'void', [param('double', 'supplyVoltageV')]) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::UpdateEnergySource() [member function] cls.add_method('UpdateEnergySource', 'void', [], is_virtual=True) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## li-ion-energy-source.h (module 'energy'): void ns3::LiIonEnergySource::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'arg0')]) return def register_Ns3RvBatteryModel_methods(root_module, cls): ## rv-battery-model.h (module 'energy'): ns3::RvBatteryModel::RvBatteryModel(ns3::RvBatteryModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::RvBatteryModel const &', 'arg0')]) ## rv-battery-model.h (module 'energy'): ns3::RvBatteryModel::RvBatteryModel() [constructor] cls.add_constructor([]) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetBatteryLevel() [member function] cls.add_method('GetBatteryLevel', 'double', []) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetCutoffVoltage() const [member function] cls.add_method('GetCutoffVoltage', 'double', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetEnergyFraction() [member function] cls.add_method('GetEnergyFraction', 'double', [], is_virtual=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetInitialEnergy() const [member function] cls.add_method('GetInitialEnergy', 'double', [], is_const=True, is_virtual=True) ## rv-battery-model.h (module 'energy'): ns3::Time ns3::RvBatteryModel::GetLifetime() const [member function] cls.add_method('GetLifetime', 'ns3::Time', [], is_const=True) ## rv-battery-model.h (module 'energy'): int ns3::RvBatteryModel::GetNumOfTerms() const [member function] cls.add_method('GetNumOfTerms', 'int', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetOpenCircuitVoltage() const [member function] cls.add_method('GetOpenCircuitVoltage', 'double', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetRemainingEnergy() [member function] cls.add_method('GetRemainingEnergy', 'double', [], is_virtual=True) ## rv-battery-model.h (module 'energy'): ns3::Time ns3::RvBatteryModel::GetSamplingInterval() const [member function] cls.add_method('GetSamplingInterval', 'ns3::Time', [], is_const=True) ## rv-battery-model.h (module 'energy'): double ns3::RvBatteryModel::GetSupplyVoltage() const [member function] cls.add_method('GetSupplyVoltage', 'double', [], is_const=True, is_virtual=True) ## rv-battery-model.h (module 'energy'): static ns3::TypeId ns3::RvBatteryModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetAlpha(double alpha) [member function] cls.add_method('SetAlpha', 'void', [param('double', 'alpha')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetBeta(double beta) [member function] cls.add_method('SetBeta', 'void', [param('double', 'beta')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetCutoffVoltage(double voltage) [member function] cls.add_method('SetCutoffVoltage', 'void', [param('double', 'voltage')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetNumOfTerms(int num) [member function] cls.add_method('SetNumOfTerms', 'void', [param('int', 'num')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetOpenCircuitVoltage(double voltage) [member function] cls.add_method('SetOpenCircuitVoltage', 'void', [param('double', 'voltage')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::SetSamplingInterval(ns3::Time interval) [member function] cls.add_method('SetSamplingInterval', 'void', [param('ns3::Time', 'interval')]) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::UpdateEnergySource() [member function] cls.add_method('UpdateEnergySource', 'void', [], is_virtual=True) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## rv-battery-model.h (module 'energy'): void ns3::RvBatteryModel::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True) return def register_Ns3SimpleDeviceEnergyModel_methods(root_module, cls): ## simple-device-energy-model.h (module 'energy'): ns3::SimpleDeviceEnergyModel::SimpleDeviceEnergyModel(ns3::SimpleDeviceEnergyModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::SimpleDeviceEnergyModel const &', 'arg0')]) ## simple-device-energy-model.h (module 'energy'): ns3::SimpleDeviceEnergyModel::SimpleDeviceEnergyModel() [constructor] cls.add_constructor([]) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::ChangeState(int newState) [member function] cls.add_method('ChangeState', 'void', [param('int', 'newState')], is_virtual=True) ## simple-device-energy-model.h (module 'energy'): ns3::Ptr<ns3::Node> ns3::SimpleDeviceEnergyModel::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## simple-device-energy-model.h (module 'energy'): double ns3::SimpleDeviceEnergyModel::GetTotalEnergyConsumption() const [member function] cls.add_method('GetTotalEnergyConsumption', 'double', [], is_const=True, is_virtual=True) ## simple-device-energy-model.h (module 'energy'): static ns3::TypeId ns3::SimpleDeviceEnergyModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::HandleEnergyDepletion() [member function] cls.add_method('HandleEnergyDepletion', 'void', [], is_virtual=True) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::SetCurrentA(double current) [member function] cls.add_method('SetCurrentA', 'void', [param('double', 'current')]) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function] cls.add_method('SetEnergySource', 'void', [param('ns3::Ptr< ns3::EnergySource >', 'source')], is_virtual=True) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## simple-device-energy-model.h (module 'energy'): void ns3::SimpleDeviceEnergyModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## simple-device-energy-model.h (module 'energy'): double ns3::SimpleDeviceEnergyModel::DoGetCurrentA() const [member function] cls.add_method('DoGetCurrentA', 'double', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3UintegerValue_methods(root_module, cls): ## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor] cls.add_constructor([]) ## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')]) ## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor] cls.add_constructor([param('uint64_t const &', 'value')]) ## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function] cls.add_method('Get', 'uint64_t', [], is_const=True) ## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function] cls.add_method('Set', 'void', [param('uint64_t const &', 'value')]) return def register_Ns3WifiModeChecker_methods(root_module, cls): ## wifi-mode.h (module 'wifi'): ns3::WifiModeChecker::WifiModeChecker() [constructor] cls.add_constructor([]) ## wifi-mode.h (module 'wifi'): ns3::WifiModeChecker::WifiModeChecker(ns3::WifiModeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiModeChecker const &', 'arg0')]) return def register_Ns3WifiModeValue_methods(root_module, cls): ## wifi-mode.h (module 'wifi'): ns3::WifiModeValue::WifiModeValue() [constructor] cls.add_constructor([]) ## wifi-mode.h (module 'wifi'): ns3::WifiModeValue::WifiModeValue(ns3::WifiModeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiModeValue const &', 'arg0')]) ## wifi-mode.h (module 'wifi'): ns3::WifiModeValue::WifiModeValue(ns3::WifiMode const & value) [constructor] cls.add_constructor([param('ns3::WifiMode const &', 'value')]) ## wifi-mode.h (module 'wifi'): ns3::Ptr<ns3::AttributeValue> ns3::WifiModeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## wifi-mode.h (module 'wifi'): bool ns3::WifiModeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## wifi-mode.h (module 'wifi'): ns3::WifiMode ns3::WifiModeValue::Get() const [member function] cls.add_method('Get', 'ns3::WifiMode', [], is_const=True) ## wifi-mode.h (module 'wifi'): std::string ns3::WifiModeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## wifi-mode.h (module 'wifi'): void ns3::WifiModeValue::Set(ns3::WifiMode const & value) [member function] cls.add_method('Set', 'void', [param('ns3::WifiMode const &', 'value')]) return def register_Ns3WifiRadioEnergyModel_methods(root_module, cls): ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModel::WifiRadioEnergyModel(ns3::WifiRadioEnergyModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiRadioEnergyModel const &', 'arg0')]) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModel::WifiRadioEnergyModel() [constructor] cls.add_constructor([]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::ChangeState(int newState) [member function] cls.add_method('ChangeState', 'void', [param('int', 'newState')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetCcaBusyCurrentA() const [member function] cls.add_method('GetCcaBusyCurrentA', 'double', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiPhy::State ns3::WifiRadioEnergyModel::GetCurrentState() const [member function] cls.add_method('GetCurrentState', 'ns3::WifiPhy::State', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetIdleCurrentA() const [member function] cls.add_method('GetIdleCurrentA', 'double', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): ns3::WifiRadioEnergyModelPhyListener * ns3::WifiRadioEnergyModel::GetPhyListener() [member function] cls.add_method('GetPhyListener', 'ns3::WifiRadioEnergyModelPhyListener *', []) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetRxCurrentA() const [member function] cls.add_method('GetRxCurrentA', 'double', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetSwitchingCurrentA() const [member function] cls.add_method('GetSwitchingCurrentA', 'double', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetTotalEnergyConsumption() const [member function] cls.add_method('GetTotalEnergyConsumption', 'double', [], is_const=True, is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::GetTxCurrentA() const [member function] cls.add_method('GetTxCurrentA', 'double', [], is_const=True) ## wifi-radio-energy-model.h (module 'energy'): static ns3::TypeId ns3::WifiRadioEnergyModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::HandleEnergyDepletion() [member function] cls.add_method('HandleEnergyDepletion', 'void', [], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetCcaBusyCurrentA(double ccaBusyCurrentA) [member function] cls.add_method('SetCcaBusyCurrentA', 'void', [param('double', 'ccaBusyCurrentA')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetEnergyDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('SetEnergyDepletionCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function] cls.add_method('SetEnergySource', 'void', [param('ns3::Ptr< ns3::EnergySource >', 'source')], is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetIdleCurrentA(double idleCurrentA) [member function] cls.add_method('SetIdleCurrentA', 'void', [param('double', 'idleCurrentA')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetRxCurrentA(double rxCurrentA) [member function] cls.add_method('SetRxCurrentA', 'void', [param('double', 'rxCurrentA')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetSwitchingCurrentA(double switchingCurrentA) [member function] cls.add_method('SetSwitchingCurrentA', 'void', [param('double', 'switchingCurrentA')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::SetTxCurrentA(double txCurrentA) [member function] cls.add_method('SetTxCurrentA', 'void', [param('double', 'txCurrentA')]) ## wifi-radio-energy-model.h (module 'energy'): void ns3::WifiRadioEnergyModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## wifi-radio-energy-model.h (module 'energy'): double ns3::WifiRadioEnergyModel::DoGetCurrentA() const [member function] cls.add_method('DoGetCurrentA', 'double', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3BasicEnergySource_methods(root_module, cls): ## basic-energy-source.h (module 'energy'): ns3::BasicEnergySource::BasicEnergySource(ns3::BasicEnergySource const & arg0) [copy constructor] cls.add_constructor([param('ns3::BasicEnergySource const &', 'arg0')]) ## basic-energy-source.h (module 'energy'): ns3::BasicEnergySource::BasicEnergySource() [constructor] cls.add_constructor([]) ## basic-energy-source.h (module 'energy'): double ns3::BasicEnergySource::GetEnergyFraction() [member function] cls.add_method('GetEnergyFraction', 'double', [], is_virtual=True) ## basic-energy-source.h (module 'energy'): ns3::Time ns3::BasicEnergySource::GetEnergyUpdateInterval() const [member function] cls.add_method('GetEnergyUpdateInterval', 'ns3::Time', [], is_const=True) ## basic-energy-source.h (module 'energy'): double ns3::BasicEnergySource::GetInitialEnergy() const [member function] cls.add_method('GetInitialEnergy', 'double', [], is_const=True, is_virtual=True) ## basic-energy-source.h (module 'energy'): double ns3::BasicEnergySource::GetRemainingEnergy() [member function] cls.add_method('GetRemainingEnergy', 'double', [], is_virtual=True) ## basic-energy-source.h (module 'energy'): double ns3::BasicEnergySource::GetSupplyVoltage() const [member function] cls.add_method('GetSupplyVoltage', 'double', [], is_const=True, is_virtual=True) ## basic-energy-source.h (module 'energy'): static ns3::TypeId ns3::BasicEnergySource::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::SetEnergyUpdateInterval(ns3::Time interval) [member function] cls.add_method('SetEnergyUpdateInterval', 'void', [param('ns3::Time', 'interval')]) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::SetInitialEnergy(double initialEnergyJ) [member function] cls.add_method('SetInitialEnergy', 'void', [param('double', 'initialEnergyJ')]) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::SetSupplyVoltage(double supplyVoltageV) [member function] cls.add_method('SetSupplyVoltage', 'void', [param('double', 'supplyVoltageV')]) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::UpdateEnergySource() [member function] cls.add_method('UpdateEnergySource', 'void', [], is_virtual=True) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## basic-energy-source.h (module 'energy'): void ns3::BasicEnergySource::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_internal(module.get_submodule('internal'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_internal(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
ericjpj/ns-3-dev
src/energy/bindings/modulegen__gcc_LP64.py
Python
gpl-2.0
360,229
#!/usr/bin/env python __author__ = 'Mike McCann' __version__ = '$Revision: $'.split()[1] __date__ = '$Date: $'.split()[1] __copyright__ = '2011' __license__ = 'GPL v3' __contact__ = 'mccann at mbari.org' __doc__ = ''' convertToMatlab.py simply copies a set of .csv files to a parallel set renaming the headers so that a simple Matlab script can read the data into a structure that make converting to NetCDF easy. File header information: ------------------------ % Device,10G100648 % File name,10G100648_20111103_112724 % Cast time (UTC),2011-11-03 11:27:24 % Cast time (local),2011-11-03 11:27:24 % Sample type,Cast % Cast data,Processed % Location source,GPS % Default latitude,32 % Default altitude,0 % Start latitude,37.7246293 % Start longitude,-0.7640086 % Start altitude,42.349998474121094 % Start GPS horizontal error(Meter),2,25 % Start GPS vertical error(Meter),4,84000015258789 % Start GPS number of satellites,7 % End latitude,37.7247336 % End longitude,-0.7635351 % End altitude,74.389999389648438 % End GPS horizontal error(Meter),3,17000007629395 % End GPS vertical error(Meter),7,03999996185303 % End GPS number of satellites,7 % Cast duration (Seconds),152 % Samples per second,5 % Electronics calibration date,0001-01-01 % Conductivity calibration date,2010-07-28 % Temperature calibration date,2010-07-28 % Pressure calibration date,2010-07-28 % Pressure (Decibar),Depth (Meter),Temperature (Celsius),Conductivity (MicroSiemens per Centimeter),Specific conductance (MicroSiemens per Centimeter),Salinity (Practical Salinity Scale),Sound velocity (Meters per Second),Density (Kilograms per Cubic Meter) @var __date__: Date of last svn commit @undocumented: __doc__ parser @status: production @license: GPL ''' import os import glob import shutil def fixHeader(line): '''For YSI Cataway data files replace header with a header that can be read by Matlab's mfcsvread.m. ''' newName = { 'Pressure (Decibar)': 'pressure', 'Depth (Meter)': 'depth', 'Temperature (Celsius)': 'temperature', 'Conductivity (MicroSiemens per Centimeter)': 'conductivity', 'Specific conductance (MicroSiemens per Centimeter)': 'specificconductivity', 'Salinity (Practical Salinity Scale)': 'salinity', 'Sound velocity (Meters per Second)': 'soundvelicity', 'Density (Kilograms per Cubic Meter)': 'density', } newLine = '' for name in line.strip().split(','): newLine += newName[name] + ',' return newLine[:-1] if __name__ == '__main__': fixedHeaderDir = 'fixedHeaders' if not os.path.exists(fixedHeaderDir): os.mkdir(fixedHeaderDir) for file in glob.glob('*.csv'): print(file) outFile = open('%s/%s' % (fixedHeaderDir, file), 'w') i = 0 for line in open(file, 'r'): if i == 0: fixedLine = fixHeader(line) outFile.write(fixedLine + '\r\n') else: outFile.write(line) i = i + 1 ##raw_input('Paused.')
duane-edgington/stoqs
stoqs/loaders/MarMenor/toNetCDF/convertToMatlab.py
Python
gpl-3.0
2,914
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2017-2018 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import hashlib import zipfile import sys from testtools.matchers import Equals from snapcraft.internal.sources import errors from snapcraft.internal.sources._checksum import verify_checksum from tests import unit if sys.version_info < (3, 6): import sha3 # noqa class TestChecksum(unit.TestCase): def setUp(self): super().setUp() def test_invalid_checksum(self): # Create a file for testing os.makedirs(os.path.join("src")) dummy_file = os.path.join("src", "test") open(dummy_file, "w").close() self.assertRaises(AttributeError, verify_checksum, "456/abcde", dummy_file) def test_correct_checksum(self): # Create zip file for testing os.makedirs(os.path.join("src")) file_to_zip = os.path.join("src", "test.txt") open(file_to_zip, "w").close() zip_file = zipfile.ZipFile(os.path.join("src", "test.zip"), "w") zip_file.write(file_to_zip) zip_file.close() calculated_checksum = hashlib.new( "md5", open(os.path.join("src", "test.zip"), "rb").read() ) calculated_checksum = calculated_checksum.hexdigest() verify_checksum("md5/" + calculated_checksum, "src/test.zip") def test_incorrect_checksum(self): # Create zip file for testing os.makedirs(os.path.join("src")) file_to_zip = os.path.join("src", "test.txt") open(file_to_zip, "w").close() zip_file = zipfile.ZipFile(os.path.join("src", "test.zip"), "w") zip_file.write(file_to_zip) zip_file.close() incorrect_checksum = "fe049cfba688aa1af88bc78191d7f904" calculated_checksum = hashlib.new( "md5", open(os.path.join("src", "test.zip"), "rb").read() ) calculated_checksum = calculated_checksum.hexdigest() raised = self.assertRaises( errors.DigestDoesNotMatchError, verify_checksum, "md5/" + incorrect_checksum, "src/test.zip", ) self.assertThat(raised.expected, Equals(incorrect_checksum)) self.assertThat(raised.calculated, Equals(calculated_checksum))
ubuntu-core/snapcraft
tests/unit/sources/test_checksum.py
Python
gpl-3.0
2,851
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import xmlrpclib import socket import os import time import base64 from osv import fields,osv import tools import netsvc from tools.translate import _ logger = netsvc.Logger() def execute(connector, method, *args): res = False try: res = getattr(connector,method)(*args) except socket.error,e: raise e return res addons_path = tools.config['addons_path'] + '/auto_backup/DBbackups' class db_backup(osv.osv): _name = 'db.backup' def get_db_list(self, cr, user, ids, host='localhost', port='8069', context={}): uri = 'http://' + host + ':' + port conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db') db_list = execute(conn, 'list') return db_list _columns = { 'host' : fields.char('Host', size=100, required='True'), 'port' : fields.char('Port', size=10, required='True'), 'name' : fields.char('Database', size=100, required='True',help='Database you want to schedule backups for'), 'bkp_dir' : fields.char('Backup Directory', size=100, help='Absolute path for storing the backups', required='True') } _defaults = { 'bkp_dir' : lambda *a : addons_path, 'host' : lambda *a : 'localhost', 'port' : lambda *a : '8069' } def _check_db_exist(self, cr, user, ids): for rec in self.browse(cr,user,ids): db_list = self.get_db_list(cr, user, ids, rec.host, rec.port) if rec.name in db_list: return True return False _constraints = [ (_check_db_exist, _('Error ! No such database exists!'), []) ] def schedule_backup(self, cr, user, context={}): conf_ids= self.search(cr, user, []) confs = self.browse(cr,user,conf_ids) for rec in confs: db_list = self.get_db_list(cr, user, [], rec.host, rec.port) if rec.name in db_list: try: if not os.path.isdir(rec.bkp_dir): os.makedirs(rec.bkp_dir) except: raise bkp_file='%s_%s.sql' % (rec.name, time.strftime('%Y%m%d_%H_%M_%S')) file_path = os.path.join(rec.bkp_dir,bkp_file) fp = open(file_path,'wb') uri = 'http://' + rec.host + ':' + rec.port conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db') bkp='' try: bkp = execute(conn, 'dump', tools.config['admin_passwd'], rec.name) except: logger.notifyChannel('backup', netsvc.LOG_INFO, "Could'nt backup database %s. Bad database administrator password for server running at http://%s:%s" %(rec.name, rec.host, rec.port)) continue bkp = base64.decodestring(bkp) fp.write(bkp) fp.close() else: logger.notifyChannel('backup', netsvc.LOG_INFO, "database %s doesn't exist on http://%s:%s" %(rec.name, rec.host, rec.port)) db_backup() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
jaggu303619/asylum
openerp/addons/auto_backup/backup_scheduler.py
Python
agpl-3.0
4,346
"""Anon job view Revision ID: e49788bea4a Revises: 5a725fd5ddd6 Create Date: 2015-02-07 01:46:03.956058 """ # revision identifiers, used by Alembic. revision = 'e49788bea4a' down_revision = '5a725fd5ddd6' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table('anon_job_view', sa.Column('jobpost_id', sa.Integer(), nullable=False), sa.Column('anon_user_id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['anon_user_id'], ['anon_user.id'], ), sa.ForeignKeyConstraint(['jobpost_id'], ['jobpost.id'], ), sa.PrimaryKeyConstraint('jobpost_id', 'anon_user_id') ) op.create_index(op.f('ix_anon_job_view_anon_user_id'), 'anon_job_view', ['anon_user_id'], unique=False) op.create_index(op.f('ix_anon_job_view_created_at'), 'anon_job_view', ['created_at'], unique=False) def downgrade(): op.drop_index(op.f('ix_anon_job_view_created_at'), table_name='anon_job_view') op.drop_index(op.f('ix_anon_job_view_anon_user_id'), table_name='anon_job_view') op.drop_table('anon_job_view')
nhannv/hasjob
alembic/versions/e49788bea4a_anon_job_view.py
Python
agpl-3.0
1,146
#!/usr/bin/env python # -*- mode: python; encoding: utf-8 -*- # """This plugin renders the results from Rekall.""" import json import re from django.utils import html as django_html from rekall.ui import json_renderer import logging from grr.client.client_actions import grr_rekall from grr.gui import renderers from grr.gui.plugins import semantic from grr.lib import aff4 from grr.lib import rdfvalue from grr.lib import utils class GRRRekallViewerRenderer(grr_rekall.GRRRekallRenderer): """Spawning new renderers hierarchy for HTML rendering.""" class GRRRekallViewerObjectRenderer(grr_rekall.GRRObjectRenderer): """Spawning new hierarchy of object renderers capable of HTML rendering.""" renderers = ["GRRRekallViewerRenderer"] def RawHTML(self, item, **options): """Returns escaped object's summary.""" return django_html.escape( utils.SmartStr( self._GetDelegateObjectRenderer(item).Summary(item, **options))) class GRREProcessObjectRenderer(GRRRekallViewerObjectRenderer): """Special rendering for _EPROCESS objects.""" renders_type = "_EPROCESS" layout = renderers.Template(""" <div id="{{unique|escape}}" class="modal fade" role="dialog" aria-hidden="true"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" aria-hidden="true" data-dismiss="modal"> &times; </button> <h4 class="modal-title">Process {{this.Cybox.Name|escape}}</h4> </div> <div id="ClientInfoContent_{{unique|escape}}" class="modal-body"> <table class="table table-hover"> <tr><th>Key</th><th>Value</th></tr> {% for k, v in data %} <tr><td>{{k|escape}}</td><td>{{v|escape}}</td></tr> {% endfor %} </table> </div> </div> </div> </div> <a href=# data-toggle="modal" data-target="#{{unique|escape}}"> {{this.Cybox.Name|escape}} ({{this.Cybox.PID|escape}}) </a> """) def _Flatten(self, prefix, item): result = [] for k, v in item.items(): next_prefix = "%s.%s" % (prefix, k) if isinstance(v, dict): result.extend(self._Flatten(next_prefix, v)) else: result.append((next_prefix, v)) return result def RawHTML(self, item, **_): return self.layout.RawHTML(this=item, data=self._Flatten("", item)) class GRRPointerObjectRenderer(GRRRekallViewerObjectRenderer): """Special rendering for Pointer objects.""" renders_type = "Pointer" def RawHTML(self, item, **_): """Renders the object the pointer points to.""" return RenderRekallObject(item["target_obj"]) def RenderRekallObject(obj, **options): """Renders encoded Rekall object with an appropriate renderer.""" renderer = json_renderer.JsonObjectRenderer.FromEncoded( obj, "GRRRekallViewerRenderer")("GRRRekallViewerRenderer") return renderer.RawHTML(obj, **options) def GetRekallObjectSummary(obj): """Returns summary string for a given encoded Rekall object.""" renderer = json_renderer.JsonObjectRenderer.FromEncoded( obj, "GRRRekallViewerRenderer")("GRRRekallViewerRenderer") return utils.SmartStr(renderer.Summary(obj)) class RekallTable(renderers.TemplateRenderer): """Renders a single Rekall Table.""" layout_template = renderers.Template(""" <table class="full-width"> <thead> <tr> {% for column in this.column_specs %} <th class="proto_header">{{column.name|escape}}</th> {% endfor %} </tr> </thead> <tbody> {% for row in this.rows %} <tr> {% for value in row %} <td class="proto_value"> {{value|safe}} </td> {% endfor %} </tr> {% endfor %} </tbody> </table> """) def __init__(self, column_specs): super(RekallTable, self).__init__() self.column_specs = column_specs self.rows = [] def AddRow(self, data): row = [] for column in self.column_specs: column_name = column.get("cname", column.get("name")) item = data.get(column_name) row.append(RenderRekallObject(item, **column)) self.rows.append(row) class PluginHeader(renderers.TemplateRenderer): """Renders metadata about plugin execution.""" layout_template = renderers.Template(""" <h1>{{this.metadata.plugin_name|escape}}</h1> """) def __init__(self, metadata, **kwargs): super(PluginHeader, self).__init__(**kwargs) self.metadata = metadata class SectionHeader(renderers.TemplateRenderer): """Renders a section header.""" layout_template = renderers.Template(""" <h3>{{this.header|escape}}</h3> """) def __init__(self, header=None, name=None, width=50, keep_sort=False, **kwargs): super(SectionHeader, self).__init__(**kwargs) self.header = header or name or "" class FreeFormatText(renderers.TemplateRenderer): """Render Free formatted text.""" layout_template = renderers.Template(""" <pre class="proto_value">{{this.data|escape}}</pre> """) def __init__(self, data, **kwargs): super(FreeFormatText, self).__init__(**kwargs) self.data = data class RekallErrorRenderer(renderers.TemplateRenderer): """Render Rekall Errors.""" layout_template = renderers.Template(""" <pre class="proto_value proto_error">{{this.data|escape}}</pre> """) def __init__(self, data, **kwargs): super(RekallErrorRenderer, self).__init__(**kwargs) self.data = data class RekallResponseCollectionRenderer(semantic.RDFValueRenderer): """A renderer for the RekallResponseCollection.""" layout_template = renderers.Template(""" {% for element in this.elements %} {{element|safe}} {% endfor %} """) def __init__(self, *args, **kw): super(RekallResponseCollectionRenderer, self).__init__(*args, **kw) self.elements = [] self.current_table = None self.free_text = [] def _flush_table(self): if self.current_table: self.elements.append(self.current_table) self.current_table = None def _flush_freetext(self): if self.free_text: self.elements.append(FreeFormatText("".join(self.free_text))) self.free_text = [] def Layout(self, request, response): if self.proxy: collection = self.proxy else: try: aff4_path = self.state.get("aff4_path") or request.REQ.get("aff4_path") collection = aff4.FACTORY.Open( aff4_path, aff4_type="RekallResponseCollection", token=request.token) except IOError: return output_directories = set() for rekall_response in collection: for statement in json.loads(rekall_response.json_messages): command = statement[0] # Metadata about currently running plugin. if command == "m": # Flush any old tables. self._flush_table() self._flush_freetext() self.elements.append(PluginHeader(statement[1])) # Start new Section. elif command == "s": self._flush_table() self._flush_freetext() self.elements.append(SectionHeader(**statement[1])) # Free format statement. elif command == "f": self._flush_table() format_string = statement[1] try: args = statement[2:] except IndexError: args = [] def FormatCallback(match): arg_pos = int(match.group(1)) # It's ok to reference args[arg_pos] as FormatCallback is only # used in the next re.sub() call and nowhere else. arg = args[arg_pos] # pylint: disable=cell-var-from-loop return GetRekallObjectSummary(arg) rendered_free_text = re.sub(r"\{(\d+)(?:\:.+?\}|\})", FormatCallback, format_string) self.free_text.append(rendered_free_text) # Errors reported from Rekall. elif command == "e": self._flush_table() self._flush_freetext() self.elements.append(RekallErrorRenderer(statement[1])) # Start Table elif command == "t": self._flush_table() self._flush_freetext() # Create a new table. self.current_table = RekallTable(statement[1]) # Add row to current table. elif command == "r": self._flush_freetext() if not self.current_table: logging.warn("Rekall plugin %s tried to write a " "table row but no table was defined.", rekall_response.plugin) # This is pretty bad but at least we can show the data somehow. self.free_text.append(utils.SmartStr(statement[1])) continue self.current_table.AddRow(statement[1]) # File that was output by rekall and extracted. elif command == "file": # Currently, when we render a client URN the link leads the user to # the directory in the virtual file system, not the particular # file. So we just render one link for each output directory. file_urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN( rdfvalue.PathSpec(**statement[1]), rekall_response.client_urn) output_directories.add(rdfvalue.RDFURN(file_urn.Dirname())) elif command == "p": # "p" command indicates progress, we don't render it. pass self._flush_table() self._flush_freetext() for directory in output_directories: self.elements.append(semantic.RDFURNRenderer(directory)) return super(RekallResponseCollectionRenderer, self).Layout( request, response)
wandec/grr
gui/plugins/rekall_viewer.py
Python
apache-2.0
9,626
# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Covertype Classifier trainer script.""" import os import pickle import subprocess import sys import fire import hypertune import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.linear_model import SGDClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler AIP_MODEL_DIR = os.environ["AIP_MODEL_DIR"] MODEL_FILENAME = "model.pkl" def train_evaluate( training_dataset_path, validation_dataset_path, alpha, max_iter, hptune ): """Trains the Covertype Classifier model.""" df_train = pd.read_csv(training_dataset_path) df_validation = pd.read_csv(validation_dataset_path) if not hptune: df_train = pd.concat([df_train, df_validation]) numeric_features = [ "Elevation", "Aspect", "Slope", "Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm", "Horizontal_Distance_To_Fire_Points", ] categorical_features = ["Wilderness_Area", "Soil_Type"] preprocessor = ColumnTransformer( transformers=[ ("num", StandardScaler(), numeric_features), ("cat", OneHotEncoder(), categorical_features), ] ) pipeline = Pipeline( [ ("preprocessor", preprocessor), ("classifier", SGDClassifier(loss="log")), ] ) num_features_type_map = {feature: "float64" for feature in numeric_features} df_train = df_train.astype(num_features_type_map) df_validation = df_validation.astype(num_features_type_map) print(f"Starting training: alpha={alpha}, max_iter={max_iter}") # pylint: disable-next=invalid-name X_train = df_train.drop("Cover_Type", axis=1) y_train = df_train["Cover_Type"] pipeline.set_params(classifier__alpha=alpha, classifier__max_iter=max_iter) pipeline.fit(X_train, y_train) if hptune: # pylint: disable-next=invalid-name X_validation = df_validation.drop("Cover_Type", axis=1) y_validation = df_validation["Cover_Type"] accuracy = pipeline.score(X_validation, y_validation) print(f"Model accuracy: {accuracy}") # Log it with hypertune hpt = hypertune.HyperTune() hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag="accuracy", metric_value=accuracy ) # Save the model if not hptune: with open(MODEL_FILENAME, "wb") as model_file: pickle.dump(pipeline, model_file) subprocess.check_call( ["gsutil", "cp", MODEL_FILENAME, AIP_MODEL_DIR], stderr=sys.stdout ) print(f"Saved model in: {AIP_MODEL_DIR}") if __name__ == "__main__": fire.Fire(train_evaluate)
GoogleCloudPlatform/asl-ml-immersion
notebooks/kubeflow_pipelines/pipelines/solutions/trainer_image_vertex/train.py
Python
apache-2.0
3,462
#!/usr/bin/env python3 # Copyright 2018-present Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import json import logging import os import pathlib import subprocess import tempfile from typing import Dict, List MY_DIR = os.path.dirname(os.path.realpath(__file__)) def load_cell_roots(repo_dir: str) -> Dict[str, str]: """Returns a map with cell keys and their roots as values.""" cell_config = ( subprocess.check_output(["buck", "audit", "cell"], cwd=repo_dir) .decode() .strip() ) logging.debug("Cell config: %r" % cell_config) cell_roots = {} # type: Dict[str, str] if not cell_config: return cell_roots for config in cell_config.split(os.linesep): cell, path = map(lambda s: s.strip(), config.split(":")) cell_roots[cell] = path logging.debug("Loaded following cell roots: %r" % cell_roots) return cell_roots def load_export_map( repo: str, cell_roots: Dict[str, str], build_file: str, cell_prefix: str = None, verbose: bool = False, ): """ Returns a dictionary with import string keys and all symbols they export as values. """ cell_root_args = [] for cell, root in cell_roots.items(): cell_root_args.extend(["--cell_root", cell + "=" + root]) dump_script = os.path.join(MY_DIR, "dump.py") verbosity_flags = ["-v"] if verbose else [] export_map_flags = ["export_map", "--use_load_function_import_string_format"] cell_prefix_flags = ( ["--cell_prefix", cell_prefix] if cell_prefix is not None else [] ) dump_command = ( [dump_script, "--json", "--repository", repo] + verbosity_flags + cell_root_args + export_map_flags + cell_prefix_flags + [build_file] ) return json.loads(subprocess.check_output(dump_command).decode().strip()) class Buildozer: """Represents a buildozer tool.""" def __init__(self, path: str, repo: str) -> None: self.path = path self.repo = repo def run(self, *commands: str) -> None: with tempfile.NamedTemporaryFile("w") as commands_file: for command in commands: commands_file.write(command) commands_file.write(os.linesep) commands_file.flush() try: subprocess.check_output( [self.path, "-f", commands_file.name], cwd=self.repo ) except subprocess.CalledProcessError as e: # return code 3 is returned when there are no changes, so # interpret it as success if e.returncode != 3: raise def add_load_funcs( buildozer: Buildozer, load_funcs: Dict[str, List[str]], package: str ) -> None: """Add load functions to package.""" commands = [] for import_string, symbols in load_funcs.items(): commands.append( "new_load " + import_string + " " + " ".join(symbols) + "|" + package + ":__pkg__" ) buildozer.run(*commands) def remove_include_defs(buildozer: Buildozer, package: str) -> None: """Remove all include_defs functions from package.""" buildozer.run("delete|" + package + ":%include_defs") def fix_unused_loads(buildozer: Buildozer, package: str) -> None: """Remove all unused load symbols from package.""" buildozer.run("fix unusedLoads|" + package + ":__pkg__") def test_build_file(build_file: str, repo: str): """Verify that build file syntax is correct.""" logging.debug("Testing %s...", build_file) subprocess.check_output(["buck", "audit", "rules", build_file], cwd=repo) def main(): parser = argparse.ArgumentParser( description="Migrates usages of include_defs function to load ." ) parser.add_argument( "-v", "--verbose", action="store_true", help="Enable verbose diagnostic messages.", ) parser.add_argument("build_file", metavar="FILE", help="Build file path.") parser.add_argument( "--repository", metavar="DIRECTORY", required="True", help="Repository path." ) parser.add_argument( "--buildozer", metavar="FILE", required=True, help="Buildozer path." ) parser.add_argument( "--cell_prefix", default="@", help="The prefix to use for cells in import strings.", ) parser.add_argument( "--test", action="store_true", help="Whether new build file should be tested in the end.", ) args = parser.parse_args() logging_level = logging.DEBUG if args.verbose else logging.INFO logging.basicConfig( level=logging_level, format=("%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s"), ) cell_roots = load_cell_roots(args.repository) buildozer = Buildozer(args.buildozer, args.repository) package_dir = os.path.dirname(args.build_file) package = str(pathlib.Path(package_dir).relative_to(args.repository)) load_funcs = load_export_map( repo=args.repository, cell_roots=cell_roots, build_file=args.build_file, cell_prefix=args.cell_prefix, verbose=args.verbose, ) logging.debug(load_funcs) add_load_funcs(buildozer, load_funcs, package) remove_include_defs(buildozer, package) fix_unused_loads(buildozer, package) if args.test: test_build_file(args.build_file, args.repository) if __name__ == "__main__": main()
rmaz/buck
scripts/migrations/migrate_include_defs_to_load.py
Python
apache-2.0
6,097
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """add dag_code table Revision ID: 952da73b5eff Revises: 852ae6c715af Create Date: 2020-03-12 12:39:01.797462 """ import sqlalchemy as sa from alembic import op from airflow.models.dagcode import DagCode # revision identifiers, used by Alembic. revision = '952da73b5eff' down_revision = '852ae6c715af' branch_labels = None depends_on = None def upgrade(): """Create DagCode Table.""" from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class SerializedDagModel(Base): __tablename__ = 'serialized_dag' # There are other columns here, but these are the only ones we need for the SELECT/UPDATE we are doing dag_id = sa.Column(sa.String(250), primary_key=True) fileloc = sa.Column(sa.String(2000), nullable=False) fileloc_hash = sa.Column(sa.BigInteger, nullable=False) """Apply add source code table""" op.create_table( 'dag_code', sa.Column('fileloc_hash', sa.BigInteger(), nullable=False, primary_key=True, autoincrement=False), sa.Column('fileloc', sa.String(length=2000), nullable=False), sa.Column('source_code', sa.UnicodeText(), nullable=False), sa.Column('last_updated', sa.TIMESTAMP(timezone=True), nullable=False), ) conn = op.get_bind() if conn.dialect.name != 'sqlite': if conn.dialect.name == "mssql": op.drop_index('idx_fileloc_hash', 'serialized_dag') op.alter_column( table_name='serialized_dag', column_name='fileloc_hash', type_=sa.BigInteger(), nullable=False ) if conn.dialect.name == "mssql": op.create_index('idx_fileloc_hash', 'serialized_dag', ['fileloc_hash']) sessionmaker = sa.orm.sessionmaker() session = sessionmaker(bind=conn) serialized_dags = session.query(SerializedDagModel).all() for dag in serialized_dags: dag.fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc) session.merge(dag) session.commit() def downgrade(): """Unapply add source code table""" op.drop_table('dag_code')
Acehaidrey/incubator-airflow
airflow/migrations/versions/952da73b5eff_add_dag_code_table.py
Python
apache-2.0
2,874
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example populates a specific first party audience segment. To determine which first party audience segments exist, run get_first_party_audience_segments.py. """ # Import appropriate modules from the client library. from googleads import dfp AUDIENCE_SEGMENT_ID = 'INSERT_AUDIENCE_SEGMENT_ID_HERE' def main(client, audience_segment_id): # Initialize client object. client = dfp.DfpClient.LoadFromStorage() # Initialize appropriate service. audience_segment_service = client.GetService( 'AudienceSegmentService', version='v201502') # Create statement object to get the specified first party audience segment. values = ( [{'key': 'type', 'value': { 'xsi_type': 'TextValue', 'value': 'FIRST_PARTY' } }, {'key': 'audience_segment_id', 'value': { 'xsi_type': 'NumberValue', 'value': AUDIENCE_SEGMENT_ID } }]) query = 'WHERE Type = :type AND Id = :audience_segment_id' statement = dfp.FilterStatement(query, values, 1) response = audience_segment_service.getAudienceSegmentsByStatement( statement.ToStatement()) if 'results' in response: segments = response['results'] for segment in segments: print ('Audience segment with id \'%s\' and name \'%s\' will be populated.' % (segment['id'], segment['name'])) action = { 'xsi_type': 'PopulateAudienceSegments' } populated_audience_segments = ( audience_segment_service.performAudienceSegmentAction( action, statement.ToStatement())) print ('%s audience segment populated' % populated_audience_segments['numChanges']) else: print 'No Results Found' if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client, AUDIENCE_SEGMENT_ID)
richardfergie/googleads-python-lib
examples/dfp/v201502/audience_segment_service/populate_first_party_audience_segments.py
Python
apache-2.0
2,508
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Mark McClain, DreamHost import contextlib import mock from neutron.common import exceptions from neutron.services.loadbalancer.drivers.haproxy import ( namespace_driver ) from neutron.tests import base class TestHaproxyNSDriver(base.BaseTestCase): def setUp(self): super(TestHaproxyNSDriver, self).setUp() self.vif_driver = mock.Mock() self.vip_plug_callback = mock.Mock() self.driver = namespace_driver.HaproxyNSDriver( 'sudo', '/the/path', self.vif_driver, self.vip_plug_callback ) self.fake_config = { 'pool': {'id': 'pool_id'}, 'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}} } def test_create(self): with mock.patch.object(self.driver, '_plug') as plug: with mock.patch.object(self.driver, '_spawn') as spawn: self.driver.create(self.fake_config) plug.assert_called_once_with( 'qlbaas-pool_id', {'id': 'port_id'} ) spawn.assert_called_once_with(self.fake_config) def test_update(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch.object(self.driver, '_spawn'), mock.patch('__builtin__.open') ) as (gsp, spawn, mock_open): mock_open.return_value = ['5'] self.driver.update(self.fake_config) mock_open.assert_called_once_with(gsp.return_value, 'r') spawn.assert_called_once_with(self.fake_config, ['-sf', '5']) def test_spawn(self): with contextlib.nested( mock.patch.object(namespace_driver.hacfg, 'save_config'), mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ) as (mock_save, gsp, ip_wrap): gsp.side_effect = lambda x, y: y self.driver._spawn(self.fake_config) mock_save.assert_called_once_with('conf', self.fake_config, 'sock') cmd = ['haproxy', '-f', 'conf', '-p', 'pid'] ip_wrap.assert_has_calls([ mock.call('sudo', 'qlbaas-pool_id'), mock.call().netns.execute(cmd) ]) def test_destroy(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch.object(namespace_driver, 'kill_pids_in_file'), mock.patch.object(self.driver, '_unplug'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), mock.patch('os.path.isdir'), mock.patch('shutil.rmtree') ) as (gsp, kill, unplug, ip_wrap, isdir, rmtree): gsp.side_effect = lambda x, y: '/pool/' + y self.driver.pool_to_port_id['pool_id'] = 'port_id' isdir.return_value = True self.driver.destroy('pool_id') kill.assert_called_once_with('sudo', '/pool/pid') unplug.assert_called_once_with('qlbaas-pool_id', 'port_id') isdir.called_once_with('/pool') rmtree.assert_called_once_with('/pool') ip_wrap.assert_has_calls([ mock.call('sudo', 'qlbaas-pool_id'), mock.call().garbage_collect_namespace() ]) def test_exists(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), mock.patch('socket.socket'), mock.patch('os.path.exists'), ) as (gsp, ip_wrap, socket, path_exists): gsp.side_effect = lambda x, y: '/pool/' + y ip_wrap.return_value.netns.exists.return_value = True path_exists.return_value = True self.driver.exists('pool_id') ip_wrap.assert_has_calls([ mock.call('sudo'), mock.call().netns.exists('qlbaas-pool_id') ]) self.assertTrue(self.driver.exists('pool_id')) def test_get_stats(self): raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,' 'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,' 'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,' 'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,' 'check_status,check_code,check_duration,hrsp_1xx,' 'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n' '8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,' '10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0' ',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' '32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,' '224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,' '1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' 'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,' '0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,' 'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n') raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,' 'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,' 'status,weight,act,bck,chkfail,chkdown,lastchg,' 'downtime,qlimit,pid,iid,sid,throttle,lbtot,' 'tracked,type,rate,rate_lim,rate_max,check_status,' 'check_code,check_duration,hrsp_1xx,hrsp_2xx,' 'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,' '\n') with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('socket.socket'), mock.patch('os.path.exists'), ) as (gsp, socket, path_exists): gsp.side_effect = lambda x, y: '/pool/' + y path_exists.return_value = True socket.return_value = socket socket.recv.return_value = raw_stats exp_stats = {'connection_errors': '0', 'active_connections': '1', 'current_sessions': '3', 'bytes_in': '7764', 'max_connections': '2', 'max_sessions': '4', 'bytes_out': '2365', 'response_errors': '0', 'total_sessions': '10', 'members': { '32a6c2a3-420a-44c3-955d-86bd2fc6871e': { 'status': 'ACTIVE', 'health': 'L7OK', 'failed_checks': '0' }, 'd9aea044-8867-4e80-9875-16fb808fa0f9': { 'status': 'INACTIVE', 'health': 'L4CON', 'failed_checks': '9' } } } stats = self.driver.get_stats('pool_id') self.assertEqual(exp_stats, stats) socket.recv.return_value = raw_stats_empty self.assertEqual({'members': {}}, self.driver.get_stats('pool_id')) path_exists.return_value = False socket.reset_mock() self.assertEqual({}, self.driver.get_stats('pool_id')) self.assertFalse(socket.called) def test_plug(self): test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1'}}]} with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port) self.vip_plug_callback.assert_called_once_with('plug', test_port) self.assertTrue(dev_exists.called) self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', 'test_interface', 'mac_addr', namespace='test_ns') self.vif_driver.init_l3.assert_called_once_with('test_interface', ['10.0.0.2/24'], namespace= 'test_ns') cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] ip_wrap.assert_has_calls([ mock.call('sudo', namespace='test_ns'), mock.call().netns.execute(cmd, check_exit_code=False), ]) dev_exists.return_value = True self.assertRaises(exceptions.PreexistingDeviceFailure, self.driver._plug, 'test_ns', test_port, False) def test_plug_no_gw(self): test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24'}}]} with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port) self.vip_plug_callback.assert_called_once_with('plug', test_port) self.assertTrue(dev_exists.called) self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', 'test_interface', 'mac_addr', namespace='test_ns') self.vif_driver.init_l3.assert_called_once_with('test_interface', ['10.0.0.2/24'], namespace= 'test_ns') self.assertFalse(ip_wrap.called) dev_exists.return_value = True self.assertRaises(exceptions.PreexistingDeviceFailure, self.driver._plug, 'test_ns', test_port, False) def test_unplug(self): self.vif_driver.get_device_name.return_value = 'test_interface' self.driver._unplug('test_ns', 'port_id') self.vip_plug_callback.assert_called_once_with('unplug', {'id': 'port_id'}) self.vif_driver.unplug('test_interface', namespace='test_ns') def test_kill_pids_in_file(self): with contextlib.nested( mock.patch('os.path.exists'), mock.patch('__builtin__.open'), mock.patch('neutron.agent.linux.utils.execute') ) as (path_exists, mock_open, mock_execute): file_mock = mock.MagicMock() mock_open.return_value = file_mock file_mock.__enter__.return_value = file_mock file_mock.__iter__.return_value = iter(['123']) path_exists.return_value = False namespace_driver.kill_pids_in_file('sudo', 'test_path') path_exists.assert_called_once_with('test_path') self.assertFalse(mock_open.called) self.assertFalse(mock_execute.called) path_exists.return_value = True mock_execute.side_effect = RuntimeError namespace_driver.kill_pids_in_file('sudo', 'test_path') mock_execute.assert_called_once_with( ['kill', '-9', '123'], 'sudo') def test_get_state_file_path(self): with mock.patch('os.makedirs') as mkdir: path = self.driver._get_state_file_path('pool_id', 'conf') self.assertEqual('/the/path/pool_id/conf', path) mkdir.assert_called_once_with('/the/path/pool_id', 0o755)
citrix-openstack-build/neutron
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py
Python
apache-2.0
14,148
"""The tests for the Yr sensor platform.""" from datetime import datetime from homeassistant.bootstrap import async_setup_component from homeassistant.const import DEGREE, SPEED_METERS_PER_SECOND, UNIT_PERCENTAGE import homeassistant.util.dt as dt_util from tests.async_mock import patch from tests.common import assert_setup_component, load_fixture NOW = datetime(2016, 6, 9, 1, tzinfo=dt_util.UTC) async def test_default_setup(hass, legacy_patchable_time, aioclient_mock): """Test the default setup.""" aioclient_mock.get( "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/1.9/", text=load_fixture("yr.no.xml"), ) config = {"platform": "yr", "elevation": 0} hass.allow_pool = True with patch( "homeassistant.components.yr.sensor.dt_util.utcnow", return_value=NOW ), assert_setup_component(1): await async_setup_component(hass, "sensor", {"sensor": config}) await hass.async_block_till_done() state = hass.states.get("sensor.yr_symbol") assert state.state == "3" assert state.attributes.get("unit_of_measurement") is None async def test_custom_setup(hass, legacy_patchable_time, aioclient_mock): """Test a custom setup.""" aioclient_mock.get( "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/1.9/", text=load_fixture("yr.no.xml"), ) config = { "platform": "yr", "elevation": 0, "monitored_conditions": [ "pressure", "windDirection", "humidity", "fog", "windSpeed", ], } hass.allow_pool = True with patch( "homeassistant.components.yr.sensor.dt_util.utcnow", return_value=NOW ), assert_setup_component(1): await async_setup_component(hass, "sensor", {"sensor": config}) await hass.async_block_till_done() state = hass.states.get("sensor.yr_pressure") assert state.attributes.get("unit_of_measurement") == "hPa" assert state.state == "1009.3" state = hass.states.get("sensor.yr_wind_direction") assert state.attributes.get("unit_of_measurement") == DEGREE assert state.state == "103.6" state = hass.states.get("sensor.yr_humidity") assert state.attributes.get("unit_of_measurement") == UNIT_PERCENTAGE assert state.state == "55.5" state = hass.states.get("sensor.yr_fog") assert state.attributes.get("unit_of_measurement") == UNIT_PERCENTAGE assert state.state == "0.0" state = hass.states.get("sensor.yr_wind_speed") assert state.attributes.get("unit_of_measurement") == SPEED_METERS_PER_SECOND assert state.state == "3.5" async def test_forecast_setup(hass, legacy_patchable_time, aioclient_mock): """Test a custom setup with 24h forecast.""" aioclient_mock.get( "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/1.9/", text=load_fixture("yr.no.xml"), ) config = { "platform": "yr", "elevation": 0, "forecast": 24, "monitored_conditions": [ "pressure", "windDirection", "humidity", "fog", "windSpeed", ], } hass.allow_pool = True with patch( "homeassistant.components.yr.sensor.dt_util.utcnow", return_value=NOW ), assert_setup_component(1): await async_setup_component(hass, "sensor", {"sensor": config}) await hass.async_block_till_done() state = hass.states.get("sensor.yr_pressure") assert state.attributes.get("unit_of_measurement") == "hPa" assert state.state == "1008.3" state = hass.states.get("sensor.yr_wind_direction") assert state.attributes.get("unit_of_measurement") == DEGREE assert state.state == "148.9" state = hass.states.get("sensor.yr_humidity") assert state.attributes.get("unit_of_measurement") == UNIT_PERCENTAGE assert state.state == "77.4" state = hass.states.get("sensor.yr_fog") assert state.attributes.get("unit_of_measurement") == UNIT_PERCENTAGE assert state.state == "0.0" state = hass.states.get("sensor.yr_wind_speed") assert state.attributes.get("unit_of_measurement") == SPEED_METERS_PER_SECOND assert state.state == "3.6"
nkgilley/home-assistant
tests/components/yr/test_sensor.py
Python
apache-2.0
4,271
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django import template from django.conf import settings register = template.Library() import re import urlparse @register.filter(name='youtube_embed_url') # Converts youtube URL into embed HTML def youtube_embed_url(value): if value: try: url_data = urlparse.urlparse(value) query = urlparse.parse_qs(url_data.query) video = query["v"][0] if video: embed_url = '//www.youtube.com/embed/%s?rel=0&autohide=1&showinfo=0&html5=1' % video return embed_url except: pass return '' @register.filter(name='youtube_embed_from_id') # Converts youtube URL into embed HTML def youtube_embed_from_id(value): return '//www.youtube.com/embed/%s?rel=0&autohide=1&showinfo=0&html5=1' % value @register.filter(name='youtube_get_id') # Converts youtube URL into embed HTML def youtube_get_id(value): if value: try: url_data = urlparse.urlparse(value) query = urlparse.parse_qs(url_data.query) video = query["v"][0] if video: return video except: pass return ''
gquirozbogner/contentbox-master
main/templatetags/youtube_embed.py
Python
apache-2.0
1,817
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer class TestMarionetteSpecific: def setup_method(self, method): firefox_capabilities = DesiredCapabilities.FIREFOX firefox_capabilities['marionette'] = True self.driver = None try: self.driver = webdriver.Firefox(desired_capabilities=firefox_capabilities) except Exception: pass self.CHROME = 'chrome' self.CONTENT = 'content' def test_we_can_switch_context_to_chrome(self): if self.driver is None: pytest.skip("GeckoDriver was not found.") self.driver.set_context(self.CHROME) self.driver.execute_script("var c = Components.classes; return 1;"); def teardown_method(self, method): try: self.driver.set_context(self.CONTENT) self.driver.quit() except: pass # Don't care since we may have killed the browser above def teardown_module(module): try: TestMarionetteSpecific.driver.quit() except: pass # Don't Care since we may have killed the browser above
p0deje/selenium
py/test/selenium/webdriver/firefox/mn_set_context_tests.py
Python
apache-2.0
2,055
"""AirTouch 4 component to control of AirTouch 4 Climate Devices.""" import logging from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( FAN_AUTO, FAN_DIFFUSE, FAN_FOCUS, FAN_HIGH, FAN_LOW, FAN_MEDIUM, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from homeassistant.core import callback from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import DOMAIN SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE AT_TO_HA_STATE = { "Heat": HVAC_MODE_HEAT, "Cool": HVAC_MODE_COOL, "AutoHeat": HVAC_MODE_AUTO, # airtouch reports either autoheat or autocool "AutoCool": HVAC_MODE_AUTO, "Auto": HVAC_MODE_AUTO, "Dry": HVAC_MODE_DRY, "Fan": HVAC_MODE_FAN_ONLY, } HA_STATE_TO_AT = { HVAC_MODE_HEAT: "Heat", HVAC_MODE_COOL: "Cool", HVAC_MODE_AUTO: "Auto", HVAC_MODE_DRY: "Dry", HVAC_MODE_FAN_ONLY: "Fan", HVAC_MODE_OFF: "Off", } AT_TO_HA_FAN_SPEED = { "Quiet": FAN_DIFFUSE, "Low": FAN_LOW, "Medium": FAN_MEDIUM, "High": FAN_HIGH, "Powerful": FAN_FOCUS, "Auto": FAN_AUTO, "Turbo": "turbo", } AT_GROUP_MODES = [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY] HA_FAN_SPEED_TO_AT = {value: key for key, value in AT_TO_HA_FAN_SPEED.items()} _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Airtouch 4.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] info = coordinator.data entities = [ AirtouchGroup(coordinator, group["group_number"], info) for group in info["groups"] ] + [AirtouchAC(coordinator, ac["ac_number"], info) for ac in info["acs"]] _LOGGER.debug(" Found entities %s", entities) async_add_entities(entities) class AirtouchAC(CoordinatorEntity, ClimateEntity): """Representation of an AirTouch 4 ac.""" _attr_supported_features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE _attr_temperature_unit = TEMP_CELSIUS def __init__(self, coordinator, ac_number, info): """Initialize the climate device.""" super().__init__(coordinator) self._ac_number = ac_number self._airtouch = coordinator.airtouch self._info = info self._unit = self._airtouch.GetAcs()[self._ac_number] @callback def _handle_coordinator_update(self): self._unit = self._airtouch.GetAcs()[self._ac_number] return super()._handle_coordinator_update() @property def device_info(self): """Return device info for this device.""" return { "identifiers": {(DOMAIN, self.unique_id)}, "name": self.name, "manufacturer": "Airtouch", "model": "Airtouch 4", } @property def unique_id(self): """Return unique ID for this device.""" return f"ac_{self._ac_number}" @property def current_temperature(self): """Return the current temperature.""" return self._unit.Temperature @property def name(self): """Return the name of the climate device.""" return f"AC {self._ac_number}" @property def fan_mode(self): """Return fan mode of the AC this group belongs to.""" return AT_TO_HA_FAN_SPEED[self._airtouch.acs[self._ac_number].AcFanSpeed] @property def fan_modes(self): """Return the list of available fan modes.""" airtouch_fan_speeds = self._airtouch.GetSupportedFanSpeedsForAc(self._ac_number) return [AT_TO_HA_FAN_SPEED[speed] for speed in airtouch_fan_speeds] @property def hvac_mode(self): """Return hvac target hvac state.""" is_off = self._unit.PowerState == "Off" if is_off: return HVAC_MODE_OFF return AT_TO_HA_STATE[self._airtouch.acs[self._ac_number].AcMode] @property def hvac_modes(self): """Return the list of available operation modes.""" airtouch_modes = self._airtouch.GetSupportedCoolingModesForAc(self._ac_number) modes = [AT_TO_HA_STATE[mode] for mode in airtouch_modes] modes.append(HVAC_MODE_OFF) return modes async def async_set_hvac_mode(self, hvac_mode): """Set new operation mode.""" if hvac_mode not in HA_STATE_TO_AT: raise ValueError(f"Unsupported HVAC mode: {hvac_mode}") if hvac_mode == HVAC_MODE_OFF: return await self.async_turn_off() await self._airtouch.SetCoolingModeForAc( self._ac_number, HA_STATE_TO_AT[hvac_mode] ) # in case it isn't already, unless the HVAC mode was off, then the ac should be on await self.async_turn_on() self._unit = self._airtouch.GetAcs()[self._ac_number] _LOGGER.debug("Setting operation mode of %s to %s", self._ac_number, hvac_mode) self.async_write_ha_state() async def async_set_fan_mode(self, fan_mode): """Set new fan mode.""" if fan_mode not in self.fan_modes: raise ValueError(f"Unsupported fan mode: {fan_mode}") _LOGGER.debug("Setting fan mode of %s to %s", self._ac_number, fan_mode) await self._airtouch.SetFanSpeedForAc( self._ac_number, HA_FAN_SPEED_TO_AT[fan_mode] ) self._unit = self._airtouch.GetAcs()[self._ac_number] self.async_write_ha_state() async def async_turn_on(self): """Turn on.""" _LOGGER.debug("Turning %s on", self.unique_id) # in case ac is not on. Airtouch turns itself off if no groups are turned on # (even if groups turned back on) await self._airtouch.TurnAcOn(self._ac_number) async def async_turn_off(self): """Turn off.""" _LOGGER.debug("Turning %s off", self.unique_id) await self._airtouch.TurnAcOff(self._ac_number) self.async_write_ha_state() class AirtouchGroup(CoordinatorEntity, ClimateEntity): """Representation of an AirTouch 4 group.""" _attr_supported_features = SUPPORT_TARGET_TEMPERATURE _attr_temperature_unit = TEMP_CELSIUS _attr_hvac_modes = AT_GROUP_MODES def __init__(self, coordinator, group_number, info): """Initialize the climate device.""" super().__init__(coordinator) self._group_number = group_number self._airtouch = coordinator.airtouch self._info = info self._unit = self._airtouch.GetGroupByGroupNumber(self._group_number) @callback def _handle_coordinator_update(self): self._unit = self._airtouch.GetGroupByGroupNumber(self._group_number) return super()._handle_coordinator_update() @property def device_info(self): """Return device info for this device.""" return { "identifiers": {(DOMAIN, self.unique_id)}, "name": self.name, "manufacturer": "Airtouch", "model": "Airtouch 4", } @property def unique_id(self): """Return unique ID for this device.""" return self._group_number @property def min_temp(self): """Return Minimum Temperature for AC of this group.""" return self._airtouch.acs[self._unit.BelongsToAc].MinSetpoint @property def max_temp(self): """Return Max Temperature for AC of this group.""" return self._airtouch.acs[self._unit.BelongsToAc].MaxSetpoint @property def name(self): """Return the name of the climate device.""" return self._unit.GroupName @property def current_temperature(self): """Return the current temperature.""" return self._unit.Temperature @property def target_temperature(self): """Return the temperature we are trying to reach.""" return self._unit.TargetSetpoint @property def hvac_mode(self): """Return hvac target hvac state.""" # there are other power states that aren't 'on' but still count as on (eg. 'Turbo') is_off = self._unit.PowerState == "Off" if is_off: return HVAC_MODE_OFF return HVAC_MODE_FAN_ONLY async def async_set_hvac_mode(self, hvac_mode): """Set new operation mode.""" if hvac_mode not in HA_STATE_TO_AT: raise ValueError(f"Unsupported HVAC mode: {hvac_mode}") if hvac_mode == HVAC_MODE_OFF: return await self.async_turn_off() if self.hvac_mode == HVAC_MODE_OFF: await self.async_turn_on() self._unit = self._airtouch.GetGroups()[self._group_number] _LOGGER.debug( "Setting operation mode of %s to %s", self._group_number, hvac_mode ) self.async_write_ha_state() @property def fan_mode(self): """Return fan mode of the AC this group belongs to.""" return AT_TO_HA_FAN_SPEED[self._airtouch.acs[self._unit.BelongsToAc].AcFanSpeed] @property def fan_modes(self): """Return the list of available fan modes.""" airtouch_fan_speeds = self._airtouch.GetSupportedFanSpeedsByGroup( self._group_number ) return [AT_TO_HA_FAN_SPEED[speed] for speed in airtouch_fan_speeds] async def async_set_temperature(self, **kwargs): """Set new target temperatures.""" temp = kwargs.get(ATTR_TEMPERATURE) _LOGGER.debug("Setting temp of %s to %s", self._group_number, str(temp)) self._unit = await self._airtouch.SetGroupToTemperature( self._group_number, int(temp) ) self.async_write_ha_state() async def async_set_fan_mode(self, fan_mode): """Set new fan mode.""" if fan_mode not in self.fan_modes: raise ValueError(f"Unsupported fan mode: {fan_mode}") _LOGGER.debug("Setting fan mode of %s to %s", self._group_number, fan_mode) self._unit = await self._airtouch.SetFanSpeedByGroup( self._group_number, HA_FAN_SPEED_TO_AT[fan_mode] ) self.async_write_ha_state() async def async_turn_on(self): """Turn on.""" _LOGGER.debug("Turning %s on", self.unique_id) await self._airtouch.TurnGroupOn(self._group_number) # in case ac is not on. Airtouch turns itself off if no groups are turned on # (even if groups turned back on) await self._airtouch.TurnAcOn( self._airtouch.GetGroupByGroupNumber(self._group_number).BelongsToAc ) # this might cause the ac object to be wrong, so force the shared data # store to update await self.coordinator.async_request_refresh() self.async_write_ha_state() async def async_turn_off(self): """Turn off.""" _LOGGER.debug("Turning %s off", self.unique_id) await self._airtouch.TurnGroupOff(self._group_number) # this will cause the ac object to be wrong # (ac turns off automatically if no groups are running) # so force the shared data store to update await self.coordinator.async_request_refresh() self.async_write_ha_state()
sander76/home-assistant
homeassistant/components/airtouch4/climate.py
Python
apache-2.0
11,301
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from pecan import Response from pecan.rest import RestController from st2common import log as logging from st2common.models.api.base import jsexpose from st2common.util.jsonify import json_encode from st2api.listener import get_listener LOG = logging.getLogger(__name__) def format(gen): # Yield initial state so client would receive the headers the moment it connects to the stream yield '\n' message = '''event: %s\ndata: %s\n\n''' for pack in gen: if not pack: yield '\n' else: (event, body) = pack yield message % (event, json_encode(body, indent=None)) class StreamController(RestController): @jsexpose(content_type='text/event-stream') def get_all(self): def make_response(): res = Response(content_type='text/event-stream', app_iter=format(get_listener().generator())) return res # Prohibit buffering response by eventlet pecan.request.environ['eventlet.minimum_write_chunk_size'] = 0 stream = make_response() return stream
grengojbo/st2
st2api/st2api/controllers/v1/stream.py
Python
apache-2.0
1,903
# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import fractions import itertools from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import units import six import nova.conf from nova import context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import fields from nova.objects import instance as obj_instance CONF = nova.conf.CONF LOG = logging.getLogger(__name__) MEMPAGES_SMALL = -1 MEMPAGES_LARGE = -2 MEMPAGES_ANY = -3 def get_vcpu_pin_set(): """Parsing vcpu_pin_set config. Returns a set of pcpu ids can be used by instances. """ if not CONF.vcpu_pin_set: return None cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set) if not cpuset_ids: raise exception.Invalid(_("No CPUs available after parsing %r") % CONF.vcpu_pin_set) return cpuset_ids def parse_cpu_spec(spec): """Parse a CPU set specification. :param spec: cpu set string eg "1-4,^3,6" Each element in the list is either a single CPU number, a range of CPU numbers, or a caret followed by a CPU number to be excluded from a previous range. :returns: a set of CPU indexes """ cpuset_ids = set() cpuset_reject_ids = set() for rule in spec.split(','): rule = rule.strip() # Handle multi ',' if len(rule) < 1: continue # Note the count limit in the .split() call range_parts = rule.split('-', 1) if len(range_parts) > 1: reject = False if range_parts[0] and range_parts[0][0] == '^': reject = True range_parts[0] = str(range_parts[0][1:]) # So, this was a range; start by converting the parts to ints try: start, end = [int(p.strip()) for p in range_parts] except ValueError: raise exception.Invalid(_("Invalid range expression %r") % rule) # Make sure it's a valid range if start > end: raise exception.Invalid(_("Invalid range expression %r") % rule) # Add available CPU ids to set if not reject: cpuset_ids |= set(range(start, end + 1)) else: cpuset_reject_ids |= set(range(start, end + 1)) elif rule[0] == '^': # Not a range, the rule is an exclusion rule; convert to int try: cpuset_reject_ids.add(int(rule[1:].strip())) except ValueError: raise exception.Invalid(_("Invalid exclusion " "expression %r") % rule) else: # OK, a single CPU to include; convert to int try: cpuset_ids.add(int(rule)) except ValueError: raise exception.Invalid(_("Invalid inclusion " "expression %r") % rule) # Use sets to handle the exclusion rules for us cpuset_ids -= cpuset_reject_ids return cpuset_ids def format_cpu_spec(cpuset, allow_ranges=True): """Format a libvirt CPU range specification. :param cpuset: set (or list) of CPU indexes Format a set/list of CPU indexes as a libvirt CPU range specification. It allow_ranges is true, it will try to detect continuous ranges of CPUs, otherwise it will just list each CPU index explicitly. :returns: a formatted CPU range string """ # We attempt to detect ranges, but don't bother with # trying to do range negations to minimize the overall # spec string length if allow_ranges: ranges = [] previndex = None for cpuindex in sorted(cpuset): if previndex is None or previndex != (cpuindex - 1): ranges.append([]) ranges[-1].append(cpuindex) previndex = cpuindex parts = [] for entry in ranges: if len(entry) == 1: parts.append(str(entry[0])) else: parts.append("%d-%d" % (entry[0], entry[len(entry) - 1])) return ",".join(parts) else: return ",".join(str(id) for id in sorted(cpuset)) def get_number_of_serial_ports(flavor, image_meta): """Get the number of serial consoles from the flavor or image :param flavor: Flavor object to read extra specs from :param image_meta: nova.objects.ImageMeta object instance If flavor extra specs is not set, then any image meta value is permitted. If flavor extra specs *is* set, then this provides the default serial port count. The image meta is permitted to override the extra specs, but *only* with a lower value. ie - flavor hw:serial_port_count=4 VM gets 4 serial ports - flavor hw:serial_port_count=4 and image hw_serial_port_count=2 VM gets 2 serial ports - image hw_serial_port_count=6 VM gets 6 serial ports - flavor hw:serial_port_count=4 and image hw_serial_port_count=6 Abort guest boot - forbidden to exceed flavor value :returns: number of serial ports """ def get_number(obj, property): num_ports = obj.get(property) if num_ports is not None: try: num_ports = int(num_ports) except ValueError: raise exception.ImageSerialPortNumberInvalid( num_ports=num_ports, property=property) return num_ports flavor_num_ports = get_number(flavor.extra_specs, "hw:serial_port_count") image_num_ports = image_meta.properties.get("hw_serial_port_count", None) if (flavor_num_ports and image_num_ports) is not None: if image_num_ports > flavor_num_ports: raise exception.ImageSerialPortNumberExceedFlavorValue() return image_num_ports return flavor_num_ports or image_num_ports or 1 class InstanceInfo(object): def __init__(self, state=None, max_mem_kb=0, mem_kb=0, num_cpu=0, cpu_time_ns=0, id=None): """Create a new Instance Info object :param state: the running state, one of the power_state codes :param max_mem_kb: (int) the maximum memory in KBytes allowed :param mem_kb: (int) the memory in KBytes used by the instance :param num_cpu: (int) the number of virtual CPUs for the instance :param cpu_time_ns: (int) the CPU time used in nanoseconds :param id: a unique ID for the instance """ self.state = state self.max_mem_kb = max_mem_kb self.mem_kb = mem_kb self.num_cpu = num_cpu self.cpu_time_ns = cpu_time_ns self.id = id def __eq__(self, other): return (self.__class__ == other.__class__ and self.__dict__ == other.__dict__) def _score_cpu_topology(topology, wanttopology): """Calculate score for the topology against a desired configuration :param wanttopology: nova.objects.VirtCPUTopology instance for preferred topology Calculate a score indicating how well this topology matches against a preferred topology. A score of 3 indicates an exact match for sockets, cores and threads. A score of 2 indicates a match of sockets & cores or sockets & threads or cores and threads. A score of 1 indicates a match of sockets or cores or threads. A score of 0 indicates no match :returns: score in range 0 (worst) to 3 (best) """ score = 0 if (wanttopology.sockets != -1 and topology.sockets == wanttopology.sockets): score = score + 1 if (wanttopology.cores != -1 and topology.cores == wanttopology.cores): score = score + 1 if (wanttopology.threads != -1 and topology.threads == wanttopology.threads): score = score + 1 return score def _get_cpu_topology_constraints(flavor, image_meta): """Get the topology constraints declared in flavor or image :param flavor: Flavor object to read extra specs from :param image_meta: nova.objects.ImageMeta object instance Gets the topology constraints from the configuration defined in the flavor extra specs or the image metadata. In the flavor this will look for hw:cpu_sockets - preferred socket count hw:cpu_cores - preferred core count hw:cpu_threads - preferred thread count hw:cpu_max_sockets - maximum socket count hw:cpu_max_cores - maximum core count hw:cpu_max_threads - maximum thread count In the image metadata this will look at hw_cpu_sockets - preferred socket count hw_cpu_cores - preferred core count hw_cpu_threads - preferred thread count hw_cpu_max_sockets - maximum socket count hw_cpu_max_cores - maximum core count hw_cpu_max_threads - maximum thread count The image metadata must be strictly lower than any values set in the flavor. All values are, however, optional. This will return a pair of nova.objects.VirtCPUTopology instances, the first giving the preferred socket/core/thread counts, and the second giving the upper limits on socket/core/ thread counts. exception.ImageVCPULimitsRangeExceeded will be raised if the maximum counts set against the image exceed the maximum counts set against the flavor exception.ImageVCPUTopologyRangeExceeded will be raised if the preferred counts set against the image exceed the maximum counts set against the image or flavor :returns: (preferred topology, maximum topology) """ # Obtain the absolute limits from the flavor flvmaxsockets = int(flavor.extra_specs.get( "hw:cpu_max_sockets", 65536)) flvmaxcores = int(flavor.extra_specs.get( "hw:cpu_max_cores", 65536)) flvmaxthreads = int(flavor.extra_specs.get( "hw:cpu_max_threads", 65536)) LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d", {"sockets": flvmaxsockets, "cores": flvmaxcores, "threads": flvmaxthreads}) # Get any customized limits from the image props = image_meta.properties maxsockets = props.get("hw_cpu_max_sockets", flvmaxsockets) maxcores = props.get("hw_cpu_max_cores", flvmaxcores) maxthreads = props.get("hw_cpu_max_threads", flvmaxthreads) LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d", {"sockets": maxsockets, "cores": maxcores, "threads": maxthreads}) # Image limits are not permitted to exceed the flavor # limits. ie they can only lower what the flavor defines if ((maxsockets > flvmaxsockets) or (maxcores > flvmaxcores) or (maxthreads > flvmaxthreads)): raise exception.ImageVCPULimitsRangeExceeded( sockets=maxsockets, cores=maxcores, threads=maxthreads, maxsockets=flvmaxsockets, maxcores=flvmaxcores, maxthreads=flvmaxthreads) # Get any default preferred topology from the flavor flvsockets = int(flavor.extra_specs.get("hw:cpu_sockets", -1)) flvcores = int(flavor.extra_specs.get("hw:cpu_cores", -1)) flvthreads = int(flavor.extra_specs.get("hw:cpu_threads", -1)) LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d", {"sockets": flvsockets, "cores": flvcores, "threads": flvthreads}) # If the image limits have reduced the flavor limits # we might need to discard the preferred topology # from the flavor if ((flvsockets > maxsockets) or (flvcores > maxcores) or (flvthreads > maxthreads)): flvsockets = flvcores = flvthreads = -1 # Finally see if the image has provided a preferred # topology to use sockets = props.get("hw_cpu_sockets", -1) cores = props.get("hw_cpu_cores", -1) threads = props.get("hw_cpu_threads", -1) LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d", {"sockets": sockets, "cores": cores, "threads": threads}) # Image topology is not permitted to exceed image/flavor # limits if ((sockets > maxsockets) or (cores > maxcores) or (threads > maxthreads)): raise exception.ImageVCPUTopologyRangeExceeded( sockets=sockets, cores=cores, threads=threads, maxsockets=maxsockets, maxcores=maxcores, maxthreads=maxthreads) # If no preferred topology was set against the image # then use the preferred topology from the flavor # We use 'and' not 'or', since if any value is set # against the image this invalidates the entire set # of values from the flavor if sockets == -1 and cores == -1 and threads == -1: sockets = flvsockets cores = flvcores threads = flvthreads LOG.debug("Chosen %(sockets)d:%(cores)d:%(threads)d limits " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d", {"sockets": sockets, "cores": cores, "threads": threads, "maxsockets": maxsockets, "maxcores": maxcores, "maxthreads": maxthreads}) return (objects.VirtCPUTopology(sockets=sockets, cores=cores, threads=threads), objects.VirtCPUTopology(sockets=maxsockets, cores=maxcores, threads=maxthreads)) def _get_possible_cpu_topologies(vcpus, maxtopology, allow_threads): """Get a list of possible topologies for a vCPU count :param vcpus: total number of CPUs for guest instance :param maxtopology: nova.objects.VirtCPUTopology for upper limits :param allow_threads: if the hypervisor supports CPU threads Given a total desired vCPU count and constraints on the maximum number of sockets, cores and threads, return a list of nova.objects.VirtCPUTopology instances that represent every possible topology that satisfies the constraints. exception.ImageVCPULimitsRangeImpossible is raised if it is impossible to achieve the total vcpu count given the maximum limits on sockets, cores & threads. :returns: list of nova.objects.VirtCPUTopology instances """ # Clamp limits to number of vcpus to prevent # iterating over insanely large list maxsockets = min(vcpus, maxtopology.sockets) maxcores = min(vcpus, maxtopology.cores) maxthreads = min(vcpus, maxtopology.threads) if not allow_threads: maxthreads = 1 LOG.debug("Build topologies for %(vcpus)d vcpu(s) " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d", {"vcpus": vcpus, "maxsockets": maxsockets, "maxcores": maxcores, "maxthreads": maxthreads}) # Figure out all possible topologies that match # the required vcpus count and satisfy the declared # limits. If the total vCPU count were very high # it might be more efficient to factorize the vcpu # count and then only iterate over its factors, but # that's overkill right now possible = [] for s in range(1, maxsockets + 1): for c in range(1, maxcores + 1): for t in range(1, maxthreads + 1): if (t * c * s) != vcpus: continue possible.append( objects.VirtCPUTopology(sockets=s, cores=c, threads=t)) # We want to # - Minimize threads (ie larger sockets * cores is best) # - Prefer sockets over cores possible = sorted(possible, reverse=True, key=lambda x: (x.sockets * x.cores, x.sockets, x.threads)) LOG.debug("Got %d possible topologies", len(possible)) if len(possible) == 0: raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus, sockets=maxsockets, cores=maxcores, threads=maxthreads) return possible def _filter_for_numa_threads(possible, wantthreads): """Filter to topologies which closest match to NUMA threads :param possible: list of nova.objects.VirtCPUTopology :param wantthreads: ideal number of threads Determine which topologies provide the closest match to the number of threads desired by the NUMA topology of the instance. The possible topologies may not have any entries which match the desired thread count. So this method will find the topologies which have the closest matching count. ie if wantthreads is 4 and the possible topologies has entries with 6, 3, 2 or 1 threads, it will return the topologies which have 3 threads, as this is the closest match not greater than 4. :returns: list of nova.objects.VirtCPUTopology """ # First figure out the largest available thread # count which is not greater than wantthreads mostthreads = 0 for topology in possible: if topology.threads > wantthreads: continue if topology.threads > mostthreads: mostthreads = topology.threads # Now restrict to just those topologies which # match the largest thread count bestthreads = [] for topology in possible: if topology.threads != mostthreads: continue bestthreads.append(topology) return bestthreads def _sort_possible_cpu_topologies(possible, wanttopology): """Sort the topologies in order of preference :param possible: list of nova.objects.VirtCPUTopology instances :param wanttopology: nova.objects.VirtCPUTopology for preferred topology This takes the list of possible topologies and resorts it such that those configurations which most closely match the preferred topology are first. :returns: sorted list of nova.objects.VirtCPUTopology instances """ # Look at possible topologies and score them according # to how well they match the preferred topologies # We don't use python's sort(), since we want to # preserve the sorting done when populating the # 'possible' list originally scores = collections.defaultdict(list) for topology in possible: score = _score_cpu_topology(topology, wanttopology) scores[score].append(topology) # Build list of all possible topologies sorted # by the match score, best match first desired = [] desired.extend(scores[3]) desired.extend(scores[2]) desired.extend(scores[1]) desired.extend(scores[0]) return desired def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True, numa_topology=None): """Get desired CPU topologies according to settings :param flavor: Flavor object to query extra specs from :param image_meta: nova.objects.ImageMeta object instance :param allow_threads: if the hypervisor supports CPU threads :param numa_topology: InstanceNUMATopology object that may contain additional topology constraints (such as threading information) that we should consider Look at the properties set in the flavor extra specs and the image metadata and build up a list of all possible valid CPU topologies that can be used in the guest. Then return this list sorted in order of preference. :returns: sorted list of nova.objects.VirtCPUTopology instances """ LOG.debug("Getting desirable topologies for flavor %(flavor)s " "and image_meta %(image_meta)s, allow threads: %(threads)s", {"flavor": flavor, "image_meta": image_meta, "threads": allow_threads}) preferred, maximum = _get_cpu_topology_constraints(flavor, image_meta) LOG.debug("Topology preferred %(preferred)s, maximum %(maximum)s", {"preferred": preferred, "maximum": maximum}) possible = _get_possible_cpu_topologies(flavor.vcpus, maximum, allow_threads) LOG.debug("Possible topologies %s", possible) if numa_topology: min_requested_threads = None cell_topologies = [cell.cpu_topology for cell in numa_topology.cells if cell.cpu_topology] if cell_topologies: min_requested_threads = min( topo.threads for topo in cell_topologies) if min_requested_threads: if preferred.threads != -1: min_requested_threads = min(preferred.threads, min_requested_threads) specified_threads = max(1, min_requested_threads) LOG.debug("Filtering topologies best for %d threads", specified_threads) possible = _filter_for_numa_threads(possible, specified_threads) LOG.debug("Remaining possible topologies %s", possible) desired = _sort_possible_cpu_topologies(possible, preferred) LOG.debug("Sorted desired topologies %s", desired) return desired def get_best_cpu_topology(flavor, image_meta, allow_threads=True, numa_topology=None): """Get best CPU topology according to settings :param flavor: Flavor object to query extra specs from :param image_meta: nova.objects.ImageMeta object instance :param allow_threads: if the hypervisor supports CPU threads :param numa_topology: InstanceNUMATopology object that may contain additional topology constraints (such as threading information) that we should consider Look at the properties set in the flavor extra specs and the image metadata and build up a list of all possible valid CPU topologies that can be used in the guest. Then return the best topology to use :returns: a nova.objects.VirtCPUTopology instance for best topology """ return _get_desirable_cpu_topologies(flavor, image_meta, allow_threads, numa_topology)[0] def _numa_cell_supports_pagesize_request(host_cell, inst_cell): """Determines whether the cell can accept the request. :param host_cell: host cell to fit the instance cell onto :param inst_cell: instance cell we want to fit :raises: exception.MemoryPageSizeNotSupported if custom page size not supported in host cell. :returns: The page size able to be handled by host_cell """ avail_pagesize = [page.size_kb for page in host_cell.mempages] avail_pagesize.sort(reverse=True) def verify_pagesizes(host_cell, inst_cell, avail_pagesize): inst_cell_mem = inst_cell.memory * units.Ki for pagesize in avail_pagesize: if host_cell.can_fit_hugepages(pagesize, inst_cell_mem): return pagesize if inst_cell.pagesize == MEMPAGES_SMALL: return verify_pagesizes(host_cell, inst_cell, avail_pagesize[-1:]) elif inst_cell.pagesize == MEMPAGES_LARGE: return verify_pagesizes(host_cell, inst_cell, avail_pagesize[:-1]) elif inst_cell.pagesize == MEMPAGES_ANY: return verify_pagesizes(host_cell, inst_cell, avail_pagesize) else: return verify_pagesizes(host_cell, inst_cell, [inst_cell.pagesize]) def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id, threads_per_core=1): """Pack an instance onto a set of siblings :param available_siblings: list of sets of CPU id's - available siblings per core :param instance_cell: An instance of objects.InstanceNUMACell describing the pinning requirements of the instance :param threads_per_core: number of threads per core in host's cell :returns: An instance of objects.InstanceNUMACell containing the pinning information, and potentially a new topology to be exposed to the instance. None if there is no valid way to satisfy the sibling requirements for the instance. This method will calculate the pinning for the given instance and it's topology, making sure that hyperthreads of the instance match up with those of the host when the pinning takes effect. Currently the strategy for packing is to prefer siblings and try use cores evenly, by using emptier cores first. This is achieved by the way we order cores in the sibling_sets structure, and the order in which we iterate through it. The main packing loop that iterates over the sibling_sets dictionary will not currently try to look for a fit that maximizes number of siblings, but will simply rely on the iteration ordering and picking the first viable placement. """ # We build up a data structure that answers the question: 'Given the # number of threads I want to pack, give me a list of all the available # sibling sets (or groups thereof) that can accommodate it' sibling_sets = collections.defaultdict(list) for sib in available_siblings: for threads_no in range(1, len(sib) + 1): sibling_sets[threads_no].append(sib) pinning = None threads_no = 1 def _orphans(instance_cell, threads_per_core): """Number of instance CPUs which will not fill up a host core. Best explained by an example: consider set of free host cores as such: [(0, 1), (3, 5), (6, 7, 8)] This would be a case of 2 threads_per_core AKA an entry for 2 in the sibling_sets structure. If we attempt to pack a 5 core instance on it - due to the fact that we iterate the list in order, we will end up with a single core of the instance pinned to a thread "alone" (with id 6), and we would have one 'orphan' vcpu. """ return len(instance_cell) % threads_per_core def _threads(instance_cell, threads_per_core): """Threads to expose to the instance via the VirtCPUTopology. This is calculated by taking the GCD of the number of threads we are considering at the moment, and the number of orphans. An example for instance_cell = 6 threads_per_core = 4 So we can fit the instance as such: [(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)] x x x x x x We can't expose 4 threads, as that will not be a valid topology (all cores exposed to the guest have to have an equal number of threads), and 1 would be too restrictive, but we want all threads that guest sees to be on the same physical core, so we take GCD of 4 (max number of threads) and 2 (number of 'orphan' CPUs) and get 2 as the number of threads. """ return fractions.gcd(threads_per_core, _orphans(instance_cell, threads_per_core)) def _get_pinning(threads_no, sibling_set, instance_cores): """Generate a CPU-vCPU pin mapping.""" if threads_no * len(sibling_set) < len(instance_cores): return usable_cores = map(lambda s: list(s)[:threads_no], sibling_set) return zip(sorted(instance_cores), itertools.chain(*usable_cores)) if (instance_cell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.REQUIRE): LOG.debug("Requested 'require' thread policy for %d cores", len(instance_cell)) elif (instance_cell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.PREFER): LOG.debug("Request 'prefer' thread policy for %d cores", len(instance_cell)) elif (instance_cell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.ISOLATE): LOG.debug("Requested 'isolate' thread policy for %d cores", len(instance_cell)) else: LOG.debug("User did not specify a thread policy. Using default " "for %d cores", len(instance_cell)) if (instance_cell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.ISOLATE): # make sure we have at least one fully free core if threads_per_core not in sibling_sets: return pinning = _get_pinning(1, # we only want to "use" one thread per core sibling_sets[threads_per_core], instance_cell.cpuset) else: # NOTE(ndipanov): We iterate over the sibling sets in descending order # of cores that can be packed. This is an attempt to evenly distribute # instances among physical cores for threads_no, sibling_set in sorted( (t for t in sibling_sets.items()), reverse=True): # NOTE(sfinucan): The key difference between the require and # prefer policies is that require will not settle for non-siblings # if this is all that is available. Enforce this by ensuring we're # using sibling sets that contain at least one sibling if (instance_cell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.REQUIRE): if threads_no <= 1: continue pinning = _get_pinning(threads_no, sibling_set, instance_cell.cpuset) if pinning: break threads_no = _threads(instance_cell, threads_no) if not pinning: return topology = objects.VirtCPUTopology(sockets=1, cores=len(pinning) / threads_no, threads=threads_no) instance_cell.pin_vcpus(*pinning) instance_cell.cpu_topology = topology instance_cell.id = host_cell_id return instance_cell def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell): """Figure out if cells can be pinned to a host cell and return details :param host_cell: objects.NUMACell instance - the host cell that the isntance should be pinned to :param instance_cell: objects.InstanceNUMACell instance without any pinning information :returns: objects.InstanceNUMACell instance with pinning information, or None if instance cannot be pinned to the given host """ if (host_cell.avail_cpus < len(instance_cell.cpuset) or host_cell.avail_memory < instance_cell.memory): # If we do not have enough CPUs available or not enough memory # on the host cell, we quit early (no oversubscription). return if host_cell.siblings: # Try to pack the instance cell onto cores return _pack_instance_onto_cores( host_cell.free_siblings, instance_cell, host_cell.id, max(map(len, host_cell.siblings))) else: # Straightforward to pin to available cpus when there is no # hyperthreading on the host return _pack_instance_onto_cores( [host_cell.free_cpus], instance_cell, host_cell.id) def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None): """Check if an instance cell can fit and set it's cell id :param host_cell: host cell to fit the instance cell onto :param instance_cell: instance cell we want to fit :param limit_cell: an objects.NUMATopologyLimit or None Make sure we can fit the instance cell onto a host cell and if so, return a new objects.InstanceNUMACell with the id set to that of the host, or None if the cell exceeds the limits of the host :returns: a new instance cell or None """ # NOTE (ndipanov): do not allow an instance to overcommit against # itself on any NUMA cell if (instance_cell.memory > host_cell.memory or len(instance_cell.cpuset) > len(host_cell.cpuset)): return None if instance_cell.cpu_pinning_requested: new_instance_cell = _numa_fit_instance_cell_with_pinning( host_cell, instance_cell) if not new_instance_cell: return new_instance_cell.pagesize = instance_cell.pagesize instance_cell = new_instance_cell elif limit_cell: memory_usage = host_cell.memory_usage + instance_cell.memory cpu_usage = host_cell.cpu_usage + len(instance_cell.cpuset) cpu_limit = len(host_cell.cpuset) * limit_cell.cpu_allocation_ratio ram_limit = host_cell.memory * limit_cell.ram_allocation_ratio if memory_usage > ram_limit or cpu_usage > cpu_limit: return None pagesize = None if instance_cell.pagesize: pagesize = _numa_cell_supports_pagesize_request( host_cell, instance_cell) if not pagesize: return instance_cell.id = host_cell.id instance_cell.pagesize = pagesize return instance_cell def _numa_get_pagesize_constraints(flavor, image_meta): """Return the requested memory page size :param flavor: a Flavor object to read extra specs from :param image_meta: nova.objects.ImageMeta object instance :raises: MemoryPagesSizeInvalid or MemoryPageSizeForbidden :returns: a page size requested or MEMPAGES_* """ def check_and_return_pages_size(request): if request == "any": return MEMPAGES_ANY elif request == "large": return MEMPAGES_LARGE elif request == "small": return MEMPAGES_SMALL else: try: request = int(request) except ValueError: try: request = strutils.string_to_bytes( request, return_int=True) / units.Ki except ValueError: request = 0 if request <= 0: raise exception.MemoryPageSizeInvalid(pagesize=request) return request flavor_request = flavor.get('extra_specs', {}).get("hw:mem_page_size", "") image_request = image_meta.properties.get("hw_mem_page_size", "") if not flavor_request and image_request: raise exception.MemoryPageSizeForbidden( pagesize=image_request, against="<empty>") if not flavor_request: # Nothing was specified for hugepages, # let's the default process running. return None pagesize = check_and_return_pages_size(flavor_request) if image_request and (pagesize in (MEMPAGES_ANY, MEMPAGES_LARGE)): return check_and_return_pages_size(image_request) elif image_request: raise exception.MemoryPageSizeForbidden( pagesize=image_request, against=flavor_request) return pagesize def _numa_get_flavor_cpu_map_list(flavor): hw_numa_cpus = [] hw_numa_cpus_set = False extra_specs = flavor.get("extra_specs", {}) for cellid in range(objects.ImageMetaProps.NUMA_NODES_MAX): cpuprop = "hw:numa_cpus.%d" % cellid if cpuprop not in extra_specs: break hw_numa_cpus.append( parse_cpu_spec(extra_specs[cpuprop])) hw_numa_cpus_set = True if hw_numa_cpus_set: return hw_numa_cpus def _numa_get_cpu_map_list(flavor, image_meta): flavor_cpu_list = _numa_get_flavor_cpu_map_list(flavor) image_cpu_list = image_meta.properties.get("hw_numa_cpus", None) if flavor_cpu_list is None: return image_cpu_list else: if image_cpu_list is not None: raise exception.ImageNUMATopologyForbidden( name='hw_numa_cpus') return flavor_cpu_list def _numa_get_flavor_mem_map_list(flavor): hw_numa_mem = [] hw_numa_mem_set = False extra_specs = flavor.get("extra_specs", {}) for cellid in range(objects.ImageMetaProps.NUMA_NODES_MAX): memprop = "hw:numa_mem.%d" % cellid if memprop not in extra_specs: break hw_numa_mem.append(int(extra_specs[memprop])) hw_numa_mem_set = True if hw_numa_mem_set: return hw_numa_mem def _numa_get_mem_map_list(flavor, image_meta): flavor_mem_list = _numa_get_flavor_mem_map_list(flavor) image_mem_list = image_meta.properties.get("hw_numa_mem", None) if flavor_mem_list is None: return image_mem_list else: if image_mem_list is not None: raise exception.ImageNUMATopologyForbidden( name='hw_numa_mem') return flavor_mem_list def _numa_get_constraints_manual(nodes, flavor, cpu_list, mem_list): cells = [] totalmem = 0 availcpus = set(range(flavor.vcpus)) for node in range(nodes): mem = mem_list[node] cpuset = cpu_list[node] for cpu in cpuset: if cpu > (flavor.vcpus - 1): raise exception.ImageNUMATopologyCPUOutOfRange( cpunum=cpu, cpumax=(flavor.vcpus - 1)) if cpu not in availcpus: raise exception.ImageNUMATopologyCPUDuplicates( cpunum=cpu) availcpus.remove(cpu) cells.append(objects.InstanceNUMACell( id=node, cpuset=cpuset, memory=mem)) totalmem = totalmem + mem if availcpus: raise exception.ImageNUMATopologyCPUsUnassigned( cpuset=str(availcpus)) if totalmem != flavor.memory_mb: raise exception.ImageNUMATopologyMemoryOutOfRange( memsize=totalmem, memtotal=flavor.memory_mb) return objects.InstanceNUMATopology(cells=cells) def is_realtime_enabled(flavor): flavor_rt = flavor.get('extra_specs', {}).get("hw:cpu_realtime") return strutils.bool_from_string(flavor_rt) def _get_realtime_mask(flavor, image): """Returns realtime mask based on flavor/image meta""" flavor_mask = flavor.get('extra_specs', {}).get("hw:cpu_realtime_mask") image_mask = image.properties.get("hw_cpu_realtime_mask") return image_mask or flavor_mask def vcpus_realtime_topology(vcpus_set, flavor, image): """Partitions vcpus used for realtime and 'normal' vcpus. According to a mask specified from flavor or image, returns set of vcpus configured for realtime scheduler and set running as a 'normal' vcpus. """ mask = _get_realtime_mask(flavor, image) if not mask: raise exception.RealtimeMaskNotFoundOrInvalid() vcpus_spec = format_cpu_spec(vcpus_set) vcpus_rt = parse_cpu_spec(vcpus_spec + ", " + mask) vcpus_em = vcpus_set - vcpus_rt if len(vcpus_rt) < 1 or len(vcpus_em) < 1: raise exception.RealtimeMaskNotFoundOrInvalid() return vcpus_rt, vcpus_em def _numa_get_constraints_auto(nodes, flavor): if ((flavor.vcpus % nodes) > 0 or (flavor.memory_mb % nodes) > 0): raise exception.ImageNUMATopologyAsymmetric() cells = [] for node in range(nodes): ncpus = int(flavor.vcpus / nodes) mem = int(flavor.memory_mb / nodes) start = node * ncpus cpuset = set(range(start, start + ncpus)) cells.append(objects.InstanceNUMACell( id=node, cpuset=cpuset, memory=mem)) return objects.InstanceNUMATopology(cells=cells) def _add_cpu_pinning_constraint(flavor, image_meta, numa_topology): flavor_policy = flavor.get('extra_specs', {}).get('hw:cpu_policy') image_policy = image_meta.properties.get('hw_cpu_policy') if flavor_policy == fields.CPUAllocationPolicy.DEDICATED: cpu_policy = flavor_policy elif flavor_policy == fields.CPUAllocationPolicy.SHARED: if image_policy == fields.CPUAllocationPolicy.DEDICATED: raise exception.ImageCPUPinningForbidden() cpu_policy = flavor_policy elif image_policy == fields.CPUAllocationPolicy.DEDICATED: cpu_policy = image_policy else: cpu_policy = fields.CPUAllocationPolicy.SHARED rt = is_realtime_enabled(flavor) if (rt and cpu_policy != fields.CPUAllocationPolicy.DEDICATED): raise exception.RealtimeConfigurationInvalid() elif rt and not _get_realtime_mask(flavor, image_meta): raise exception.RealtimeMaskNotFoundOrInvalid() flavor_thread_policy = flavor.get('extra_specs', {}).get( 'hw:cpu_thread_policy') image_thread_policy = image_meta.properties.get('hw_cpu_thread_policy') if cpu_policy == fields.CPUAllocationPolicy.SHARED: if flavor_thread_policy or image_thread_policy: raise exception.CPUThreadPolicyConfigurationInvalid() return numa_topology if flavor_thread_policy in [None, fields.CPUThreadAllocationPolicy.PREFER]: cpu_thread_policy = image_thread_policy elif image_thread_policy and image_thread_policy != flavor_thread_policy: raise exception.ImageCPUThreadPolicyForbidden() else: cpu_thread_policy = flavor_thread_policy if numa_topology: for cell in numa_topology.cells: cell.cpu_policy = cpu_policy cell.cpu_thread_policy = cpu_thread_policy else: single_cell = objects.InstanceNUMACell( id=0, cpuset=set(range(flavor.vcpus)), memory=flavor.memory_mb, cpu_policy=cpu_policy, cpu_thread_policy=cpu_thread_policy) numa_topology = objects.InstanceNUMATopology(cells=[single_cell]) return numa_topology # TODO(sahid): Move numa related to hardward/numa.py def numa_get_constraints(flavor, image_meta): """Return topology related to input request :param flavor: Flavor object to read extra specs from :param image_meta: nova.objects.ImageMeta object instance May raise exception.ImageNUMATopologyIncomplete() if the image properties are not correctly specified, or exception.ImageNUMATopologyForbidden if an attempt is made to override flavor settings with image properties. :returns: InstanceNUMATopology or None """ nodes = flavor.get('extra_specs', {}).get("hw:numa_nodes") props = image_meta.properties if nodes is not None: if props.obj_attr_is_set("hw_numa_nodes"): raise exception.ImageNUMATopologyForbidden( name='hw_numa_nodes') nodes = int(nodes) else: nodes = props.get("hw_numa_nodes") pagesize = _numa_get_pagesize_constraints( flavor, image_meta) numa_topology = None if nodes or pagesize: nodes = nodes or 1 cpu_list = _numa_get_cpu_map_list(flavor, image_meta) mem_list = _numa_get_mem_map_list(flavor, image_meta) # If one property list is specified both must be if ((cpu_list is None and mem_list is not None) or (cpu_list is not None and mem_list is None)): raise exception.ImageNUMATopologyIncomplete() # If any node has data set, all nodes must have data set if ((cpu_list is not None and len(cpu_list) != nodes) or (mem_list is not None and len(mem_list) != nodes)): raise exception.ImageNUMATopologyIncomplete() if cpu_list is None: numa_topology = _numa_get_constraints_auto( nodes, flavor) else: numa_topology = _numa_get_constraints_manual( nodes, flavor, cpu_list, mem_list) # We currently support same pagesize for all cells. [setattr(c, 'pagesize', pagesize) for c in numa_topology.cells] return _add_cpu_pinning_constraint(flavor, image_meta, numa_topology) def numa_fit_instance_to_host( host_topology, instance_topology, limits=None, pci_requests=None, pci_stats=None): """Fit the instance topology onto the host topology given the limits :param host_topology: objects.NUMATopology object to fit an instance on :param instance_topology: objects.InstanceNUMATopology to be fitted :param limits: objects.NUMATopologyLimits that defines limits :param pci_requests: instance pci_requests :param pci_stats: pci_stats for the host Given a host and instance topology and optionally limits - this method will attempt to fit instance cells onto all permutations of host cells by calling the _numa_fit_instance_cell method, and return a new InstanceNUMATopology with it's cell ids set to host cell id's of the first successful permutation, or None. """ if not (host_topology and instance_topology): LOG.debug("Require both a host and instance NUMA topology to " "fit instance on host.") return elif len(host_topology) < len(instance_topology): LOG.debug("There are not enough free cores on the system to schedule " "the instance correctly. Required: %(required)s, actual: " "%(actual)s", {'required': len(instance_topology), 'actual': len(host_topology)}) return else: # TODO(ndipanov): We may want to sort permutations differently # depending on whether we want packing/spreading over NUMA nodes for host_cell_perm in itertools.permutations( host_topology.cells, len(instance_topology)): cells = [] for host_cell, instance_cell in zip( host_cell_perm, instance_topology.cells): try: got_cell = _numa_fit_instance_cell( host_cell, instance_cell, limits) except exception.MemoryPageSizeNotSupported: # This exception will been raised if instance cell's # custom pagesize is not supported with host cell in # _numa_cell_supports_pagesize_request function. break if got_cell is None: break cells.append(got_cell) if len(cells) == len(host_cell_perm): if not pci_requests: return objects.InstanceNUMATopology(cells=cells) elif ((pci_stats is not None) and pci_stats.support_requests(pci_requests, cells)): return objects.InstanceNUMATopology(cells=cells) def _numa_pagesize_usage_from_cell(hostcell, instancecell, sign): topo = [] for pages in hostcell.mempages: if pages.size_kb == instancecell.pagesize: topo.append(objects.NUMAPagesTopology( size_kb=pages.size_kb, total=pages.total, used=max(0, pages.used + instancecell.memory * units.Ki / pages.size_kb * sign))) else: topo.append(pages) return topo def numa_usage_from_instances(host, instances, free=False): """Get host topology usage :param host: objects.NUMATopology with usage information :param instances: list of objects.InstanceNUMATopology :param free: If True usage of the host will be decreased Sum the usage from all @instances to report the overall host topology usage :returns: objects.NUMATopology including usage information """ if host is None: return instances = instances or [] cells = [] sign = -1 if free else 1 for hostcell in host.cells: memory_usage = hostcell.memory_usage cpu_usage = hostcell.cpu_usage newcell = objects.NUMACell( id=hostcell.id, cpuset=hostcell.cpuset, memory=hostcell.memory, cpu_usage=0, memory_usage=0, mempages=hostcell.mempages, pinned_cpus=hostcell.pinned_cpus, siblings=hostcell.siblings) for instance in instances: for instancecell in instance.cells: if instancecell.id == hostcell.id: memory_usage = ( memory_usage + sign * instancecell.memory) cpu_usage_diff = len(instancecell.cpuset) if (instancecell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.ISOLATE and hostcell.siblings): cpu_usage_diff *= max(map(len, hostcell.siblings)) cpu_usage += sign * cpu_usage_diff if instancecell.pagesize and instancecell.pagesize > 0: newcell.mempages = _numa_pagesize_usage_from_cell( hostcell, instancecell, sign) if instance.cpu_pinning_requested: pinned_cpus = set(instancecell.cpu_pinning.values()) if free: if (instancecell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.ISOLATE): newcell.unpin_cpus_with_siblings(pinned_cpus) else: newcell.unpin_cpus(pinned_cpus) else: if (instancecell.cpu_thread_policy == fields.CPUThreadAllocationPolicy.ISOLATE): newcell.pin_cpus_with_siblings(pinned_cpus) else: newcell.pin_cpus(pinned_cpus) newcell.cpu_usage = max(0, cpu_usage) newcell.memory_usage = max(0, memory_usage) cells.append(newcell) return objects.NUMATopology(cells=cells) # TODO(ndipanov): Remove when all code paths are using objects def instance_topology_from_instance(instance): """Convenience method for getting the numa_topology out of instances Since we may get an Instance as either a dict, a db object, or an actual Instance object, this makes sure we get beck either None, or an instance of objects.InstanceNUMATopology class. """ if isinstance(instance, obj_instance.Instance): # NOTE (ndipanov): This may cause a lazy-load of the attribute instance_numa_topology = instance.numa_topology else: if 'numa_topology' in instance: instance_numa_topology = instance['numa_topology'] elif 'uuid' in instance: try: instance_numa_topology = ( objects.InstanceNUMATopology.get_by_instance_uuid( context.get_admin_context(), instance['uuid']) ) except exception.NumaTopologyNotFound: instance_numa_topology = None else: instance_numa_topology = None if instance_numa_topology: if isinstance(instance_numa_topology, six.string_types): instance_numa_topology = ( objects.InstanceNUMATopology.obj_from_primitive( jsonutils.loads(instance_numa_topology))) elif isinstance(instance_numa_topology, dict): # NOTE (ndipanov): A horrible hack so that we can use # this in the scheduler, since the # InstanceNUMATopology object is serialized raw using # the obj_base.obj_to_primitive, (which is buggy and # will give us a dict with a list of InstanceNUMACell # objects), and then passed to jsonutils.to_primitive, # which will make a dict out of those objects. All of # this is done by scheduler.utils.build_request_spec # called in the conductor. # # Remove when request_spec is a proper object itself! dict_cells = instance_numa_topology.get('cells') if dict_cells: cells = [objects.InstanceNUMACell( id=cell['id'], cpuset=set(cell['cpuset']), memory=cell['memory'], pagesize=cell.get('pagesize'), cpu_pinning=cell.get('cpu_pinning_raw'), cpu_policy=cell.get('cpu_policy'), cpu_thread_policy=cell.get('cpu_thread_policy')) for cell in dict_cells] instance_numa_topology = objects.InstanceNUMATopology( cells=cells) return instance_numa_topology # TODO(ndipanov): Remove when all code paths are using objects def host_topology_and_format_from_host(host): """Convenience method for getting the numa_topology out of hosts Since we may get a host as either a dict, a db object, or an actual ComputeNode object, or an instance of HostState class, this makes sure we get beck either None, or an instance of objects.NUMATopology class. :returns: A two-tuple, first element is the topology itself or None, second is a boolean set to True if topology was in JSON format. """ was_json = False try: host_numa_topology = host.get('numa_topology') except AttributeError: host_numa_topology = host.numa_topology if host_numa_topology is not None and isinstance( host_numa_topology, six.string_types): was_json = True host_numa_topology = (objects.NUMATopology.obj_from_db_obj( host_numa_topology)) return host_numa_topology, was_json # TODO(ndipanov): Remove when all code paths are using objects def get_host_numa_usage_from_instance(host, instance, free=False, never_serialize_result=False): """Calculate new 'numa_usage' of 'host' from 'instance' NUMA usage This is a convenience method to help us handle the fact that we use several different types throughout the code (ComputeNode and Instance objects, dicts, scheduler HostState) which may have both json and deserialized versions of objects.numa classes. Handles all the complexity without polluting the class method with it. :param host: nova.objects.ComputeNode instance, or a db object or dict :param instance: nova.objects.Instance instance, or a db object or dict :param free: if True the returned topology will have it's usage decreased instead. :param never_serialize_result: if True result will always be an instance of objects.NUMATopology class. :returns: numa_usage in the format it was on the host or objects.NUMATopology instance if never_serialize_result was True """ instance_numa_topology = instance_topology_from_instance(instance) if instance_numa_topology: instance_numa_topology = [instance_numa_topology] host_numa_topology, jsonify_result = host_topology_and_format_from_host( host) updated_numa_topology = ( numa_usage_from_instances( host_numa_topology, instance_numa_topology, free=free)) if updated_numa_topology is not None: if jsonify_result and not never_serialize_result: updated_numa_topology = updated_numa_topology._to_json() return updated_numa_topology
zhimin711/nova
nova/virt/hardware.py
Python
apache-2.0
56,084
"""Fallback pure Python implementation of msgpack""" import sys import struct import warnings if sys.version_info[0] == 2: PY2 = True int_types = (int, long) def dict_iteritems(d): return d.iteritems() else: PY2 = False int_types = int unicode = str xrange = range def dict_iteritems(d): return d.items() if sys.version_info < (3, 5): # Ugly hack... RecursionError = RuntimeError def _is_recursionerror(e): return len(e.args) == 1 and isinstance(e.args[0], str) and \ e.args[0].startswith('maximum recursion depth exceeded') else: def _is_recursionerror(e): return True if hasattr(sys, 'pypy_version_info'): # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own # StringBuilder is fastest. from __pypy__ import newlist_hint try: from __pypy__.builders import BytesBuilder as StringBuilder except ImportError: from __pypy__.builders import StringBuilder USING_STRINGBUILDER = True class StringIO(object): def __init__(self, s=b''): if s: self.builder = StringBuilder(len(s)) self.builder.append(s) else: self.builder = StringBuilder() def write(self, s): if isinstance(s, memoryview): s = s.tobytes() elif isinstance(s, bytearray): s = bytes(s) self.builder.append(s) def getvalue(self): return self.builder.build() else: USING_STRINGBUILDER = False from io import BytesIO as StringIO newlist_hint = lambda size: [] from pip._vendor.msgpack.exceptions import ( BufferFull, OutOfData, ExtraData, FormatError, StackError, ) from pip._vendor.msgpack import ExtType EX_SKIP = 0 EX_CONSTRUCT = 1 EX_READ_ARRAY_HEADER = 2 EX_READ_MAP_HEADER = 3 TYPE_IMMEDIATE = 0 TYPE_ARRAY = 1 TYPE_MAP = 2 TYPE_RAW = 3 TYPE_BIN = 4 TYPE_EXT = 5 DEFAULT_RECURSE_LIMIT = 511 def _check_type_strict(obj, t, type=type, tuple=tuple): if type(t) is tuple: return type(obj) in t else: return type(obj) is t def _get_data_from_buffer(obj): try: view = memoryview(obj) except TypeError: # try to use legacy buffer protocol if 2.7, otherwise re-raise if PY2: view = memoryview(buffer(obj)) warnings.warn("using old buffer interface to unpack %s; " "this leads to unpacking errors if slicing is used and " "will be removed in a future version" % type(obj), RuntimeWarning, stacklevel=3) else: raise if view.itemsize != 1: raise ValueError("cannot unpack from multi-byte object") return view def unpack(stream, **kwargs): warnings.warn( "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", DeprecationWarning, stacklevel=2) data = stream.read() return unpackb(data, **kwargs) def unpackb(packed, **kwargs): """ Unpack an object from `packed`. Raises ``ExtraData`` when *packed* contains extra bytes. Raises ``ValueError`` when *packed* is incomplete. Raises ``FormatError`` when *packed* is not valid msgpack. Raises ``StackError`` when *packed* contains too nested. Other exceptions can be raised during unpacking. See :class:`Unpacker` for options. """ unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs) unpacker.feed(packed) try: ret = unpacker._unpack() except OutOfData: raise ValueError("Unpack failed: incomplete input") except RecursionError as e: if _is_recursionerror(e): raise StackError raise if unpacker._got_extradata(): raise ExtraData(ret, unpacker._get_extradata()) return ret if sys.version_info < (2, 7, 6): def _unpack_from(f, b, o=0): """Explicit typcast for legacy struct.unpack_from""" return struct.unpack_from(f, bytes(b), o) else: _unpack_from = struct.unpack_from class Unpacker(object): """Streaming unpacker. arguments: :param file_like: File-like object having `.read(n)` method. If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. :param int read_size: Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) :param bool use_list: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple. (default: True) :param bool raw: If true, unpack msgpack raw to Python bytes (default). Otherwise, unpack to Python str (or unicode on Python 2) by decoding with UTF-8 encoding (recommended). Currently, the default is true, but it will be changed to false in near future. So you must specify it explicitly for keeping backward compatibility. *encoding* option which is deprecated overrides this option. :param bool strict_map_key: If true, only str or bytes are accepted for map (dict) keys. It's False by default for backward-compatibility. But it will be True from msgpack 1.0. :param callable object_hook: When specified, it should be callable. Unpacker calls it with a dict argument after unpacking msgpack map. (See also simplejson) :param callable object_pairs_hook: When specified, it should be callable. Unpacker calls it with a list of key-value pairs after unpacking msgpack map. (See also simplejson) :param str encoding: Encoding used for decoding msgpack raw. If it is None (default), msgpack raw is deserialized to Python bytes. :param str unicode_errors: (deprecated) Used for decoding msgpack raw with *encoding*. (default: `'strict'`) :param int max_buffer_size: Limits size of data waiting unpacked. 0 means system's INT_MAX (default). Raises `BufferFull` exception when it is insufficient. You should set this parameter when unpacking data from untrusted source. :param int max_str_len: Deprecated, use *max_buffer_size* instead. Limits max length of str. (default: max_buffer_size or 1024*1024) :param int max_bin_len: Deprecated, use *max_buffer_size* instead. Limits max length of bin. (default: max_buffer_size or 1024*1024) :param int max_array_len: Limits max length of array. (default: max_buffer_size or 128*1024) :param int max_map_len: Limits max length of map. (default: max_buffer_size//2 or 32*1024) :param int max_ext_len: Deprecated, use *max_buffer_size* instead. Limits max size of ext type. (default: max_buffer_size or 1024*1024) Example of streaming deserialize from file-like object:: unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024) for o in unpacker: process(o) Example of streaming deserialize from socket:: unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024) while True: buf = sock.recv(1024**2) if not buf: break unpacker.feed(buf) for o in unpacker: process(o) Raises ``ExtraData`` when *packed* contains extra bytes. Raises ``OutOfData`` when *packed* is incomplete. Raises ``FormatError`` when *packed* is not valid msgpack. Raises ``StackError`` when *packed* contains too nested. Other exceptions can be raised during unpacking. """ def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, strict_map_key=False, object_hook=None, object_pairs_hook=None, list_hook=None, encoding=None, unicode_errors=None, max_buffer_size=0, ext_hook=ExtType, max_str_len=-1, max_bin_len=-1, max_array_len=-1, max_map_len=-1, max_ext_len=-1): if encoding is not None: warnings.warn( "encoding is deprecated, Use raw=False instead.", DeprecationWarning, stacklevel=2) if unicode_errors is None: unicode_errors = 'strict' if file_like is None: self._feeding = True else: if not callable(file_like.read): raise TypeError("`file_like.read` must be callable") self.file_like = file_like self._feeding = False #: array of bytes fed. self._buffer = bytearray() #: Which position we currently reads self._buff_i = 0 # When Unpacker is used as an iterable, between the calls to next(), # the buffer is not "consumed" completely, for efficiency sake. # Instead, it is done sloppily. To make sure we raise BufferFull at # the correct moments, we have to keep track of how sloppy we were. # Furthermore, when the buffer is incomplete (that is: in the case # we raise an OutOfData) we need to rollback the buffer to the correct # state, which _buf_checkpoint records. self._buf_checkpoint = 0 if max_str_len == -1: max_str_len = max_buffer_size or 1024*1024 if max_bin_len == -1: max_bin_len = max_buffer_size or 1024*1024 if max_array_len == -1: max_array_len = max_buffer_size or 128*1024 if max_map_len == -1: max_map_len = max_buffer_size//2 or 32*1024 if max_ext_len == -1: max_ext_len = max_buffer_size or 1024*1024 self._max_buffer_size = max_buffer_size or 2**31-1 if read_size > self._max_buffer_size: raise ValueError("read_size must be smaller than max_buffer_size") self._read_size = read_size or min(self._max_buffer_size, 16*1024) self._raw = bool(raw) self._strict_map_key = bool(strict_map_key) self._encoding = encoding self._unicode_errors = unicode_errors self._use_list = use_list self._list_hook = list_hook self._object_hook = object_hook self._object_pairs_hook = object_pairs_hook self._ext_hook = ext_hook self._max_str_len = max_str_len self._max_bin_len = max_bin_len self._max_array_len = max_array_len self._max_map_len = max_map_len self._max_ext_len = max_ext_len self._stream_offset = 0 if list_hook is not None and not callable(list_hook): raise TypeError('`list_hook` is not callable') if object_hook is not None and not callable(object_hook): raise TypeError('`object_hook` is not callable') if object_pairs_hook is not None and not callable(object_pairs_hook): raise TypeError('`object_pairs_hook` is not callable') if object_hook is not None and object_pairs_hook is not None: raise TypeError("object_pairs_hook and object_hook are mutually " "exclusive") if not callable(ext_hook): raise TypeError("`ext_hook` is not callable") def feed(self, next_bytes): assert self._feeding view = _get_data_from_buffer(next_bytes) if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): raise BufferFull # Strip buffer before checkpoint before reading file. if self._buf_checkpoint > 0: del self._buffer[:self._buf_checkpoint] self._buff_i -= self._buf_checkpoint self._buf_checkpoint = 0 # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython self._buffer.extend(view) def _consume(self): """ Gets rid of the used parts of the buffer. """ self._stream_offset += self._buff_i - self._buf_checkpoint self._buf_checkpoint = self._buff_i def _got_extradata(self): return self._buff_i < len(self._buffer) def _get_extradata(self): return self._buffer[self._buff_i:] def read_bytes(self, n): return self._read(n) def _read(self, n): # (int) -> bytearray self._reserve(n) i = self._buff_i self._buff_i = i+n return self._buffer[i:i+n] def _reserve(self, n): remain_bytes = len(self._buffer) - self._buff_i - n # Fast path: buffer has n bytes already if remain_bytes >= 0: return if self._feeding: self._buff_i = self._buf_checkpoint raise OutOfData # Strip buffer before checkpoint before reading file. if self._buf_checkpoint > 0: del self._buffer[:self._buf_checkpoint] self._buff_i -= self._buf_checkpoint self._buf_checkpoint = 0 # Read from file remain_bytes = -remain_bytes while remain_bytes > 0: to_read_bytes = max(self._read_size, remain_bytes) read_data = self.file_like.read(to_read_bytes) if not read_data: break assert isinstance(read_data, bytes) self._buffer += read_data remain_bytes -= len(read_data) if len(self._buffer) < n + self._buff_i: self._buff_i = 0 # rollback raise OutOfData def _read_header(self, execute=EX_CONSTRUCT): typ = TYPE_IMMEDIATE n = 0 obj = None self._reserve(1) b = self._buffer[self._buff_i] self._buff_i += 1 if b & 0b10000000 == 0: obj = b elif b & 0b11100000 == 0b11100000: obj = -1 - (b ^ 0xff) elif b & 0b11100000 == 0b10100000: n = b & 0b00011111 typ = TYPE_RAW if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) elif b == 0xc0: obj = None elif b == 0xc2: obj = False elif b == 0xc3: obj = True elif b == 0xc4: typ = TYPE_BIN self._reserve(1) n = self._buffer[self._buff_i] self._buff_i += 1 if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc5: typ = TYPE_BIN self._reserve(2) n = _unpack_from(">H", self._buffer, self._buff_i)[0] self._buff_i += 2 if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc6: typ = TYPE_BIN self._reserve(4) n = _unpack_from(">I", self._buffer, self._buff_i)[0] self._buff_i += 4 if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc7: # ext 8 typ = TYPE_EXT self._reserve(2) L, n = _unpack_from('Bb', self._buffer, self._buff_i) self._buff_i += 2 if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xc8: # ext 16 typ = TYPE_EXT self._reserve(3) L, n = _unpack_from('>Hb', self._buffer, self._buff_i) self._buff_i += 3 if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xc9: # ext 32 typ = TYPE_EXT self._reserve(5) L, n = _unpack_from('>Ib', self._buffer, self._buff_i) self._buff_i += 5 if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xca: self._reserve(4) obj = _unpack_from(">f", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xcb: self._reserve(8) obj = _unpack_from(">d", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xcc: self._reserve(1) obj = self._buffer[self._buff_i] self._buff_i += 1 elif b == 0xcd: self._reserve(2) obj = _unpack_from(">H", self._buffer, self._buff_i)[0] self._buff_i += 2 elif b == 0xce: self._reserve(4) obj = _unpack_from(">I", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xcf: self._reserve(8) obj = _unpack_from(">Q", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xd0: self._reserve(1) obj = _unpack_from("b", self._buffer, self._buff_i)[0] self._buff_i += 1 elif b == 0xd1: self._reserve(2) obj = _unpack_from(">h", self._buffer, self._buff_i)[0] self._buff_i += 2 elif b == 0xd2: self._reserve(4) obj = _unpack_from(">i", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xd3: self._reserve(8) obj = _unpack_from(">q", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xd4: # fixext 1 typ = TYPE_EXT if self._max_ext_len < 1: raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) self._reserve(2) n, obj = _unpack_from("b1s", self._buffer, self._buff_i) self._buff_i += 2 elif b == 0xd5: # fixext 2 typ = TYPE_EXT if self._max_ext_len < 2: raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) self._reserve(3) n, obj = _unpack_from("b2s", self._buffer, self._buff_i) self._buff_i += 3 elif b == 0xd6: # fixext 4 typ = TYPE_EXT if self._max_ext_len < 4: raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) self._reserve(5) n, obj = _unpack_from("b4s", self._buffer, self._buff_i) self._buff_i += 5 elif b == 0xd7: # fixext 8 typ = TYPE_EXT if self._max_ext_len < 8: raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) self._reserve(9) n, obj = _unpack_from("b8s", self._buffer, self._buff_i) self._buff_i += 9 elif b == 0xd8: # fixext 16 typ = TYPE_EXT if self._max_ext_len < 16: raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) self._reserve(17) n, obj = _unpack_from("b16s", self._buffer, self._buff_i) self._buff_i += 17 elif b == 0xd9: typ = TYPE_RAW self._reserve(1) n = self._buffer[self._buff_i] self._buff_i += 1 if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xda: typ = TYPE_RAW self._reserve(2) n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xdb: typ = TYPE_RAW self._reserve(4) n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xdc: typ = TYPE_ARRAY self._reserve(2) n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b == 0xdd: typ = TYPE_ARRAY self._reserve(4) n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b == 0xde: self._reserve(2) n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP elif b == 0xdf: self._reserve(4) n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP else: raise FormatError("Unknown header: 0x%x" % b) return typ, n, obj def _unpack(self, execute=EX_CONSTRUCT): typ, n, obj = self._read_header(execute) if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: raise ValueError("Expected array") return n if execute == EX_READ_MAP_HEADER: if typ != TYPE_MAP: raise ValueError("Expected map") return n # TODO should we eliminate the recursion? if typ == TYPE_ARRAY: if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call `list_hook` self._unpack(EX_SKIP) return ret = newlist_hint(n) for i in xrange(n): ret.append(self._unpack(EX_CONSTRUCT)) if self._list_hook is not None: ret = self._list_hook(ret) # TODO is the interaction between `list_hook` and `use_list` ok? return ret if self._use_list else tuple(ret) if typ == TYPE_MAP: if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call hooks self._unpack(EX_SKIP) self._unpack(EX_SKIP) return if self._object_pairs_hook is not None: ret = self._object_pairs_hook( (self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT)) for _ in xrange(n)) else: ret = {} for _ in xrange(n): key = self._unpack(EX_CONSTRUCT) if self._strict_map_key and type(key) not in (unicode, bytes): raise ValueError("%s is not allowed for map key" % str(type(key))) ret[key] = self._unpack(EX_CONSTRUCT) if self._object_hook is not None: ret = self._object_hook(ret) return ret if execute == EX_SKIP: return if typ == TYPE_RAW: if self._encoding is not None: obj = obj.decode(self._encoding, self._unicode_errors) elif self._raw: obj = bytes(obj) else: obj = obj.decode('utf_8') return obj if typ == TYPE_EXT: return self._ext_hook(n, bytes(obj)) if typ == TYPE_BIN: return bytes(obj) assert typ == TYPE_IMMEDIATE return obj def __iter__(self): return self def __next__(self): try: ret = self._unpack(EX_CONSTRUCT) self._consume() return ret except OutOfData: self._consume() raise StopIteration except RecursionError: raise StackError next = __next__ def skip(self): self._unpack(EX_SKIP) self._consume() def unpack(self): try: ret = self._unpack(EX_CONSTRUCT) except RecursionError: raise StackError self._consume() return ret def read_array_header(self): ret = self._unpack(EX_READ_ARRAY_HEADER) self._consume() return ret def read_map_header(self): ret = self._unpack(EX_READ_MAP_HEADER) self._consume() return ret def tell(self): return self._stream_offset class Packer(object): """ MessagePack Packer usage: packer = Packer() astream.write(packer.pack(a)) astream.write(packer.pack(b)) Packer's constructor has some keyword arguments: :param callable default: Convert user type to builtin type that Packer supports. See also simplejson's document. :param bool use_single_float: Use single precision float type for float. (default: False) :param bool autoreset: Reset buffer after each pack and return its content as `bytes`. (default: True). If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. :param bool use_bin_type: Use bin type introduced in msgpack spec 2.0 for bytes. It also enables str8 type for unicode. :param bool strict_types: If set to true, types will be checked to be exact. Derived classes from serializeable types will not be serialized and will be treated as unsupported type and forwarded to default. Additionally tuples will not be serialized as lists. This is useful when trying to implement accurate serialization for python types. :param str encoding: (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') :param str unicode_errors: Error handler for encoding unicode. (default: 'strict') """ def __init__(self, default=None, encoding=None, unicode_errors=None, use_single_float=False, autoreset=True, use_bin_type=False, strict_types=False): if encoding is None: encoding = 'utf_8' else: warnings.warn( "encoding is deprecated, Use raw=False instead.", DeprecationWarning, stacklevel=2) if unicode_errors is None: unicode_errors = 'strict' self._strict_types = strict_types self._use_float = use_single_float self._autoreset = autoreset self._use_bin_type = use_bin_type self._encoding = encoding self._unicode_errors = unicode_errors self._buffer = StringIO() if default is not None: if not callable(default): raise TypeError("default must be callable") self._default = default def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, check=isinstance, check_type_strict=_check_type_strict): default_used = False if self._strict_types: check = check_type_strict list_types = list else: list_types = (list, tuple) while True: if nest_limit < 0: raise ValueError("recursion limit exceeded") if obj is None: return self._buffer.write(b"\xc0") if check(obj, bool): if obj: return self._buffer.write(b"\xc3") return self._buffer.write(b"\xc2") if check(obj, int_types): if 0 <= obj < 0x80: return self._buffer.write(struct.pack("B", obj)) if -0x20 <= obj < 0: return self._buffer.write(struct.pack("b", obj)) if 0x80 <= obj <= 0xff: return self._buffer.write(struct.pack("BB", 0xcc, obj)) if -0x80 <= obj < 0: return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) if 0xff < obj <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xcd, obj)) if -0x8000 <= obj < -0x80: return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) if 0xffff < obj <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xce, obj)) if -0x80000000 <= obj < -0x8000: return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) if 0xffffffff < obj <= 0xffffffffffffffff: return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) if -0x8000000000000000 <= obj < -0x80000000: return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) if not default_used and self._default is not None: obj = self._default(obj) default_used = True continue raise OverflowError("Integer value out of range") if check(obj, (bytes, bytearray)): n = len(obj) if n >= 2**32: raise ValueError("%s is too large" % type(obj).__name__) self._pack_bin_header(n) return self._buffer.write(obj) if check(obj, unicode): if self._encoding is None: raise TypeError( "Can't encode unicode string: " "no encoding is specified") obj = obj.encode(self._encoding, self._unicode_errors) n = len(obj) if n >= 2**32: raise ValueError("String is too large") self._pack_raw_header(n) return self._buffer.write(obj) if check(obj, memoryview): n = len(obj) * obj.itemsize if n >= 2**32: raise ValueError("Memoryview is too large") self._pack_bin_header(n) return self._buffer.write(obj) if check(obj, float): if self._use_float: return self._buffer.write(struct.pack(">Bf", 0xca, obj)) return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) if check(obj, ExtType): code = obj.code data = obj.data assert isinstance(code, int) assert isinstance(data, bytes) L = len(data) if L == 1: self._buffer.write(b'\xd4') elif L == 2: self._buffer.write(b'\xd5') elif L == 4: self._buffer.write(b'\xd6') elif L == 8: self._buffer.write(b'\xd7') elif L == 16: self._buffer.write(b'\xd8') elif L <= 0xff: self._buffer.write(struct.pack(">BB", 0xc7, L)) elif L <= 0xffff: self._buffer.write(struct.pack(">BH", 0xc8, L)) else: self._buffer.write(struct.pack(">BI", 0xc9, L)) self._buffer.write(struct.pack("b", code)) self._buffer.write(data) return if check(obj, list_types): n = len(obj) self._pack_array_header(n) for i in xrange(n): self._pack(obj[i], nest_limit - 1) return if check(obj, dict): return self._pack_map_pairs(len(obj), dict_iteritems(obj), nest_limit - 1) if not default_used and self._default is not None: obj = self._default(obj) default_used = 1 continue raise TypeError("Cannot serialize %r" % (obj, )) def pack(self, obj): try: self._pack(obj) except: self._buffer = StringIO() # force reset raise if self._autoreset: ret = self._buffer.getvalue() self._buffer = StringIO() return ret def pack_map_pairs(self, pairs): self._pack_map_pairs(len(pairs), pairs) if self._autoreset: ret = self._buffer.getvalue() self._buffer = StringIO() return ret def pack_array_header(self, n): if n >= 2**32: raise ValueError self._pack_array_header(n) if self._autoreset: ret = self._buffer.getvalue() self._buffer = StringIO() return ret def pack_map_header(self, n): if n >= 2**32: raise ValueError self._pack_map_header(n) if self._autoreset: ret = self._buffer.getvalue() self._buffer = StringIO() return ret def pack_ext_type(self, typecode, data): if not isinstance(typecode, int): raise TypeError("typecode must have int type.") if not 0 <= typecode <= 127: raise ValueError("typecode should be 0-127") if not isinstance(data, bytes): raise TypeError("data must have bytes type") L = len(data) if L > 0xffffffff: raise ValueError("Too large data") if L == 1: self._buffer.write(b'\xd4') elif L == 2: self._buffer.write(b'\xd5') elif L == 4: self._buffer.write(b'\xd6') elif L == 8: self._buffer.write(b'\xd7') elif L == 16: self._buffer.write(b'\xd8') elif L <= 0xff: self._buffer.write(b'\xc7' + struct.pack('B', L)) elif L <= 0xffff: self._buffer.write(b'\xc8' + struct.pack('>H', L)) else: self._buffer.write(b'\xc9' + struct.pack('>I', L)) self._buffer.write(struct.pack('B', typecode)) self._buffer.write(data) def _pack_array_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x90 + n)) if n <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xdc, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdd, n)) raise ValueError("Array is too large") def _pack_map_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x80 + n)) if n <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xde, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdf, n)) raise ValueError("Dict is too large") def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): self._pack_map_header(n) for (k, v) in pairs: self._pack(k, nest_limit - 1) self._pack(v, nest_limit - 1) def _pack_raw_header(self, n): if n <= 0x1f: self._buffer.write(struct.pack('B', 0xa0 + n)) elif self._use_bin_type and n <= 0xff: self._buffer.write(struct.pack('>BB', 0xd9, n)) elif n <= 0xffff: self._buffer.write(struct.pack(">BH", 0xda, n)) elif n <= 0xffffffff: self._buffer.write(struct.pack(">BI", 0xdb, n)) else: raise ValueError('Raw is too large') def _pack_bin_header(self, n): if not self._use_bin_type: return self._pack_raw_header(n) elif n <= 0xff: return self._buffer.write(struct.pack('>BB', 0xc4, n)) elif n <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xc5, n)) elif n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xc6, n)) else: raise ValueError('Bin is too large') def bytes(self): """Return internal buffer contents as bytes object""" return self._buffer.getvalue() def reset(self): """Reset internal buffer. This method is usaful only when autoreset=False. """ self._buffer = StringIO() def getbuffer(self): """Return view of internal buffer.""" if USING_STRINGBUILDER or PY2: return memoryview(self.bytes()) else: return self._buffer.getbuffer()
xyuanmu/XX-Net
python3.8.2/Lib/site-packages/pip/_vendor/msgpack/fallback.py
Python
bsd-2-clause
37,491
# -*- coding:utf-8 -*- # Copyright (c) 2015, Galaxy Authors. All Rights Reserved # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Author: [email protected] # Date: 2015-04-06 import datetime import logging from sofa.pbrpc import client from galaxy import master_pb2 from galaxy import galaxy_pb2 from common import util LOG = logging.getLogger('console') class BaseEntity(object): def __setattr__(self,name,value): self.__dict__[name] = value def __getattr__(self,name): return self.__dict__.get(name,None) class GalaxySDK(object): """ Lumia python sdk """ def __init__(self, master_addr): self.channel = client.Channel(master_addr) def get_pods(self, jobid): """ """ controller = client.Controller() controller.SetTimeout(5) master = master_pb2.Master_Stub(self.channel) request = master_pb2.ShowPodRequest() request.jobid = jobid response = master.ShowPod(controller, request) if response.status != galaxy_pb2.kOk: LOG.error("fail get pods"); return [], False for pod in response.pods: new_pod = util.pb2dict(pod) new_pod["stage"] = galaxy_pb2.PodStage.Name(pod.stage) new_pod["state"] = galaxy_pb2.PodState.Name(pod.state) pods.append(new_pod) return pods, True def get_all_job(self): controller = client.Controller() controller.SetTimeout(5) master = master_pb2.Master_Stub(self.channel) request = master_pb2.ListJobsRequest() response = master.ListJobs(controller, request) return response.jobs, True def get_real_time_status(self): controller = client.Controller() controller.SetTimeout(5) master = master_pb2.Master_Stub(self.channel) request = master_pb2.GetMasterStatusRequest() response = master.GetStatus(controller, request) return response
imotai/galaxy
platform/src/galaxy/sdk.py
Python
bsd-3-clause
2,033
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('base', '0035_remove_deleted_field'), ] operations = [ migrations.AddField( model_name='project', name='has_changed', field=models.BooleanField(default=False), ), ]
participedia/pontoon
pontoon/base/migrations/0036_project_has_changed.py
Python
bsd-3-clause
408
from __future__ import print_function, absolute_import, division import six import numpy as np import struct import warnings import string from astropy import log from astropy.io import registry as io_registry from ..spectral_cube import BaseSpectralCube from .fits import load_fits_cube """ .. TODO:: When any section length is zero, that means the following values are to be ignored. No warning is needed. """ # Constant: r2deg = 180/np.pi # see sicfits.f90 _ctype_dict={'LII':'GLON', 'BII':'GLAT', 'VELOCITY':'VELO', 'RA':'RA', 'DEC':'DEC', 'FREQUENCY': 'FREQ', } _cunit_dict = {'LII':'deg', 'BII':'deg', 'VELOCITY':'km s-1', 'RA':'deg', 'DEC':'deg', 'FREQUENCY': 'MHz', } cel_types = ('RA','DEC','GLON','GLAT') # CLASS apparently defaults to an ARC (zenithal equidistant) projection; this # is what is output in case the projection # is zero when exporting from CLASS _proj_dict = {0:'ARC', 1:'TAN', 2:'SIN', 3:'AZP', 4:'STG', 5:'ZEA', 6:'AIT', 7:'GLS', 8:'SFL', } _bunit_dict = {'k (tmb)': 'K'} def is_lmv(origin, filepath, fileobj, *args, **kwargs): """ Determine whether input is in GILDAS CLASS lmv format """ return filepath is not None and filepath.lower().endswith('.lmv') def read_lmv(lf): """ Read an LMV cube file Specification is primarily in GILDAS image_def.f90 """ log.warning("CLASS LMV cube reading is tentatively supported. " "Please post bug reports at the first sign of danger!") # lf for "LMV File" filetype = _read_string(lf, 12) #!--------------------------------------------------------------------- #! @ private #! SYCODE system code #! '-' IEEE #! '.' EEEI (IBM like) #! '_' VAX #! IMCODE file code #! '<' IEEE 64 bits (Little Endian, 99.9 % of recent computers) #! '>' EEEI 64 bits (Big Endian, HPUX, IBM-RISC, and SPARC ...) #!--------------------------------------------------------------------- imcode = filetype[6] if filetype[:6] != 'GILDAS' or filetype[7:] != 'IMAGE': raise TypeError("File is not a GILDAS Image file") if imcode in ('<','>'): if imcode =='>': log.warning("Swap the endianness first...") return read_lmv_type2(lf) else: return read_lmv_type1(lf) def read_lmv_type1(lf): header = {} # fmt probably matters! Default is "r4", i.e. float32 data, but could be float64 fmt = np.fromfile(lf, dtype='int32', count=1) # 4 # number of data blocks ndb = np.fromfile(lf, dtype='int32', count=1) # 5 gdf_type = np.fromfile(lf, dtype='int32', count=1) # 6 # Reserved Space reserved_fill = np.fromfile(lf, dtype='int32', count=4) # 7 general_section_length = np.fromfile(lf, dtype='int32', count=1) # 11 #print "Format: ",fmt," ndb: ",ndb, " fill: ",fill," other: ",unknown # pos 12 naxis,naxis1,naxis2,naxis3,naxis4 = np.fromfile(lf,count=5,dtype='int32') header['NAXIS'] = naxis header['NAXIS1'] = naxis1 header['NAXIS2'] = naxis2 header['NAXIS3'] = naxis3 header['NAXIS4'] = naxis4 # We are indexing bytes from here; CLASS indices are higher by 12 # pos 17 header['CRPIX1'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL1'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT1'] = np.fromfile(lf,count=1,dtype='float64')[0] * r2deg header['CRPIX2'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL2'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT2'] = np.fromfile(lf,count=1,dtype='float64')[0] * r2deg header['CRPIX3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRPIX4'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL4'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT4'] = np.fromfile(lf,count=1,dtype='float64')[0] # pos 41 #print "Post-crval",lf.tell() blank_section_length = np.fromfile(lf,count=1,dtype='int32') if blank_section_length != 8: warnings.warn("Invalid section length found for blanking section") bval = np.fromfile(lf,count=1,dtype='float32')[0] # 42 header['TOLERANC'] = np.fromfile(lf,count=1,dtype='int32')[0] # 43 eval = tolerance extrema_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 44 if extrema_section_length != 40: warnings.warn("Invalid section length found for extrema section") vmin,vmax = np.fromfile(lf,count=2,dtype='float32') # 45 xmin,xmax,ymin,ymax,zmin,zmax = np.fromfile(lf,count=6,dtype='int32') # 47 wmin,wmax = np.fromfile(lf,count=2,dtype='int32') # 53 description_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 55 if description_section_length != 72: warnings.warn("Invalid section length found for description section") #strings = lf.read(description_section_length) # 56 header['BUNIT'] = _read_string(lf, 12) # 56 header['CTYPE1'] = _read_string(lf, 12) # 59 header['CTYPE2'] = _read_string(lf, 12) # 62 header['CTYPE3'] = _read_string(lf, 12) # 65 header['CTYPE4'] = _read_string(lf, 12) # 68 header['CUNIT1'] = _cunit_dict[header['CTYPE1'].strip()] header['CUNIT2'] = _cunit_dict[header['CTYPE2'].strip()] header['CUNIT3'] = _cunit_dict[header['CTYPE3'].strip()] header['COOSYS'] = _read_string(lf, 12) # 71 position_section_length = np.fromfile(lf,count=1,dtype='int32') # 74 if position_section_length != 48: warnings.warn("Invalid section length found for position section") header['OBJNAME'] = _read_string(lf, 4*3) # 75 header['RA'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 78 header['DEC'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 80 header['GLON'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 82 header['GLAT'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 84 header['EQUINOX'] = np.fromfile(lf,count=1,dtype='float32')[0] # 86 header['PROJWORD'] = _read_string(lf, 4) # 87 header['PTYP'] = np.fromfile(lf,count=1,dtype='int32')[0] # 88 header['A0'] = np.fromfile(lf,count=1,dtype='float64')[0] # 89 header['D0'] = np.fromfile(lf,count=1,dtype='float64')[0] # 91 header['PANG'] = np.fromfile(lf,count=1,dtype='float64')[0] # 93 header['XAXI'] = np.fromfile(lf,count=1,dtype='float32')[0] # 95 header['YAXI'] = np.fromfile(lf,count=1,dtype='float32')[0] # 96 spectroscopy_section_length = np.fromfile(lf,count=1,dtype='int32') # 97 if spectroscopy_section_length != 48: warnings.warn("Invalid section length found for spectroscopy section") header['RECVR'] = _read_string(lf, 12) # 98 header['FRES'] = np.fromfile(lf,count=1,dtype='float64')[0] # 101 header['IMAGFREQ'] = np.fromfile(lf,count=1,dtype='float64')[0] # 103 "FIMA" header['REFFREQ'] = np.fromfile(lf,count=1,dtype='float64')[0] # 105 header['VRES'] = np.fromfile(lf,count=1,dtype='float32')[0] # 107 header['VOFF'] = np.fromfile(lf,count=1,dtype='float32')[0] # 108 header['FAXI'] = np.fromfile(lf,count=1,dtype='int32')[0] # 109 resolution_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 110 if resolution_section_length != 12: warnings.warn("Invalid section length found for resolution section") #header['DOPP'] = np.fromfile(lf,count=1,dtype='float16')[0] # 110a ??? #header['VTYP'] = np.fromfile(lf,count=1,dtype='int16')[0] # 110b # integer, parameter :: vel_unk = 0 ! Unsupported referential :: planetary...) # integer, parameter :: vel_lsr = 1 ! LSR referential # integer, parameter :: vel_hel = 2 ! Heliocentric referential # integer, parameter :: vel_obs = 3 ! Observatory referential # integer, parameter :: vel_ear = 4 ! Earth-Moon barycenter referential # integer, parameter :: vel_aut = -1 ! Take referential from data header['BMAJ'] = np.fromfile(lf,count=1,dtype='float32')[0] # 111 header['BMIN'] = np.fromfile(lf,count=1,dtype='float32')[0] # 112 header['BPA'] = np.fromfile(lf,count=1,dtype='float32')[0] # 113 noise_section_length = np.fromfile(lf,count=1,dtype='int32') if noise_section_length != 0: warnings.warn("Invalid section length found for noise section") header['NOISE'] = np.fromfile(lf,count=1,dtype='float32')[0] # 115 header['RMS'] = np.fromfile(lf,count=1,dtype='float32')[0] # 116 astrometry_section_length = np.fromfile(lf,count=1,dtype='int32') if astrometry_section_length != 0: warnings.warn("Invalid section length found for astrometry section") header['MURA'] = np.fromfile(lf,count=1,dtype='float32')[0] # 118 header['MUDEC'] = np.fromfile(lf,count=1,dtype='float32')[0] # 119 header['PARALLAX'] = np.fromfile(lf,count=1,dtype='float32')[0] # 120 # Apparently CLASS headers aren't required to fill the 'value at # reference pixel' column if (header['CTYPE1'].strip() == 'RA' and header['CRVAL1'] == 0 and header['RA'] != 0): header['CRVAL1'] = header['RA'] header['CRVAL2'] = header['DEC'] # Copied from the type 2 reader: # Use the appropriate projection type ptyp = header['PTYP'] for kw in header: if 'CTYPE' in kw: if header[kw].strip() in cel_types: n_dashes = 5-len(header[kw].strip()) header[kw] = header[kw].strip()+ '-'*n_dashes + _proj_dict[ptyp] other_info = np.fromfile(lf, count=7, dtype='float32') # 121-end if not np.all(other_info == 0): warnings.warn("Found additional information in the last 7 bytes") endpoint = 508 if lf.tell() != endpoint: raise ValueError("Header was not parsed correctly") data = np.fromfile(lf, count=naxis1*naxis2*naxis3, dtype='float32') data[data == bval] = np.nan # for no apparent reason, y and z are 1-indexed and x is zero-indexed if (wmin-1,zmin-1,ymin-1,xmin) != np.unravel_index(np.nanargmin(data), [naxis4,naxis3,naxis2,naxis1]): warnings.warn("Data min location does not match that on file. " "Possible error reading data.") if (wmax-1,zmax-1,ymax-1,xmax) != np.unravel_index(np.nanargmax(data), [naxis4,naxis3,naxis2,naxis1]): warnings.warn("Data max location does not match that on file. " "Possible error reading data.") if np.nanmax(data) != vmax: warnings.warn("Data max does not match that on file. " "Possible error reading data.") if np.nanmin(data) != vmin: warnings.warn("Data min does not match that on file. " "Possible error reading data.") return data.reshape([naxis4,naxis3,naxis2,naxis1]),header # debug #return data.reshape([naxis3,naxis2,naxis1]), header, hdr_f, hdr_s, hdr_i, hdr_d, hdr_d_2 def read_lmv_tofits(fileobj): from astropy.io import fits data,header = read_lmv(fileobj) # LMV may contain extra dimensions that are improperly labeled data = data.squeeze() bad_kws = ['NAXIS4','CRVAL4','CRPIX4','CDELT4','CROTA4','CUNIT4','CTYPE4'] cards = [fits.header.Card(keyword=k, value=v[0], comment=v[1]) if isinstance(v, tuple) else fits.header.Card(''.join(s for s in k if s in string.printable), ''.join(s for s in v if s in string.printable) if isinstance(v, six.string_types) else v) for k,v in six.iteritems(header) if k not in bad_kws] Header = fits.Header(cards) hdu = fits.PrimaryHDU(data=data, header=Header) return hdu def load_lmv_cube(fileobj, target_cls=None, use_dask=None): hdu = read_lmv_tofits(fileobj) meta = {'filename':fileobj.name} return load_fits_cube(hdu, meta=meta, use_dask=use_dask) def _read_byte(f): '''Read a single byte (from idlsave)''' return np.uint8(struct.unpack('=B', f.read(4)[:1])[0]) def _read_int16(f): '''Read a signed 16-bit integer (from idlsave)''' return np.int16(struct.unpack('=h', f.read(4)[2:4])[0]) def _read_int32(f): '''Read a signed 32-bit integer (from idlsave)''' return np.int32(struct.unpack('=i', f.read(4))[0]) def _read_int64(f): '''Read a signed 64-bit integer ''' return np.int64(struct.unpack('=q', f.read(8))[0]) def _read_float32(f): '''Read a 32-bit float (from idlsave)''' return np.float32(struct.unpack('=f', f.read(4))[0]) def _read_string(f, size): '''Read a string of known maximum length''' return f.read(size).decode('utf-8').strip() def _read_float64(f): '''Read a 64-bit float (from idlsave)''' return np.float64(struct.unpack('=d', f.read(8))[0]) def _check_val(name, got,expected): if got != expected: log.warning("{2} = {0} instead of {1}".format(got, expected, name)) def read_lmv_type2(lf): """ See image_def.f90 """ header = {} lf.seek(12) # DONE before integer(kind=4) :: ijtyp(3) = 0 ! 1 Image Type # fmt probably matters! Default is "r4", i.e. float32 data, but could be float64 fmt = _read_int32(lf) # 4 # number of data blocks ndb = _read_int64(lf) # 5 nhb = _read_int32(lf) # 7 ntb = _read_int32(lf) # 8 version_gdf = _read_int32(lf) # 9 if version_gdf != 20: raise TypeError("Trying to read a version-2 file, but the version" " number is {0} (should be 20)".format(version_gdf)) type_gdf = _read_int32(lf) # 10 dim_start = _read_int32(lf) # 11 pad_trail = _read_int32(lf) # 12 if dim_start % 2 == 0: log.warning("Got even dim_start in lmv cube: this is not expected.") if dim_start > 17: log.warning("dim_start > 17 in lmv cube: this is not expected.") lf.seek(16*4) gdf_maxdims=7 dim_words = _read_int32(lf) # 17 if dim_words != 2*gdf_maxdims+2: log.warning("dim_words = {0} instead of {1}".format(dim_words, gdf_maxdims*2+2)) blan_start = _read_int32(lf) # 18 if blan_start != dim_start+dim_words+2: log.warning("blan_star = {0} instead of {1}".format(blan_start, dim_start+dim_words+2)) mdim = _read_int32(lf) # 19 ndim = _read_int32(lf) # 20 dims = np.fromfile(lf, count=gdf_maxdims, dtype='int64') if np.count_nonzero(dims) != ndim: raise ValueError("Disagreement between ndims and number of nonzero dims.") header['NAXIS'] = ndim valid_dims = [] for ii,dim in enumerate(dims): if dim != 0: header['NAXIS{0}'.format(ii+1)] = dim valid_dims.append(ii) blan_words = _read_int32(lf) if blan_words != 2: log.warning("blan_words = {0} instead of 2".format(blan_words)) extr_start = _read_int32(lf) bval = _read_float32(lf) # blanking value bval_tol = _read_float32(lf) # eval = tolerance # FITS requires integer BLANKs #header['BLANK'] = bval extr_words = _read_int32(lf) if extr_words != 6: log.warning("extr_words = {0} instead of 6".format(extr_words)) coor_start = _read_int32(lf) if coor_start != extr_start+extr_words+2: log.warning("coor_start = {0} instead of {1}".format(coor_start, extr_start+extr_words+2)) rmin = _read_float32(lf) rmax = _read_float32(lf) # position 168 minloc = _read_int64(lf) maxloc = _read_int64(lf) # lf.seek(184) coor_words = _read_int32(lf) if coor_words != gdf_maxdims*6: log.warning("coor_words = {0} instead of {1}".format(coor_words, gdf_maxdims*6)) desc_start = _read_int32(lf) if desc_start != coor_start+coor_words+2: log.warning("desc_start = {0} instead of {1}".format(desc_start, coor_start+coor_words+2)) convert = np.fromfile(lf, count=3*gdf_maxdims, dtype='float64').reshape([gdf_maxdims,3]) # conversion of "convert" to CRPIX/CRVAL/CDELT below desc_words = _read_int32(lf) if desc_words != 3*(gdf_maxdims+1): log.warning("desc_words = {0} instead of {1}".format(desc_words, 3*(gdf_maxdims+1))) null_start = _read_int32(lf) if null_start != desc_start+desc_words+2: log.warning("null_start = {0} instead of {1}".format(null_start, desc_start+desc_words+2)) ijuni = _read_string(lf, 12) # data unit ijcode = [_read_string(lf, 12) for ii in range(gdf_maxdims)] pad_desc = _read_int32(lf) if ijuni.lower() in _bunit_dict: header['BUNIT'] = (_bunit_dict[ijuni.lower()], ijuni) else: header['BUNIT'] = ijuni #! The first block length is thus #! s_dim-1 + (2*mdim+4) + (4) + (8) + (6*mdim+2) + (3*mdim+5) #! = s_dim-1 + mdim*(2+6+3) + (4+4+2+5+8) #! = s_dim-1 + 11*mdim + 23 #! With mdim = 7, s_dim=11, this is 110 spaces #! With mdim = 8, s_dim=11, this is 121 spaces #! MDIM > 8 would NOT fit in one block... #! #! Block 2: Ancillary information #! #! The same logic of Length + Pointer is used there too, although the #! length are fixed. Note rounding to even number for the pointer offsets #! in order to preserve alignement... #! lf.seek(512) posi_words = _read_int32(lf) _check_val('posi_words', posi_words, 15) proj_start = _read_int32(lf) source_name = _read_string(lf, 12) header['OBJECT'] = source_name coordinate_system = _read_string(lf, 12) header['RA'] = _read_float64(lf) header['DEC'] = _read_float64(lf) header['LII'] = _read_float64(lf) header['BII'] = _read_float64(lf) header['EPOCH'] = _read_float32(lf) #pad_posi = _read_float32(lf) #print pad_posi #raise ValueError("pad_posi should probably be 0?") #! PROJECTION #integer(kind=4) :: proj_words = 9 ! Projection length: 9 used + 1 padding #integer(kind=4) :: spec_start !! = proj_start + 12 #real(kind=8) :: a0 = 0.d0 ! 89 X of projection center #real(kind=8) :: d0 = 0.d0 ! 91 Y of projection center #real(kind=8) :: pang = 0.d0 ! 93 Projection angle #integer(kind=4) :: ptyp = p_none ! 88 Projection type (see p_... codes) #integer(kind=4) :: xaxi = 0 ! 95 X axis #integer(kind=4) :: yaxi = 0 ! 96 Y axis #integer(kind=4) :: pad_proj #! proj_words = _read_int32(lf) spec_start = _read_int32(lf) _check_val('spec_start', spec_start, proj_start+proj_words+2) if proj_words == 9: header['PROJ_A0'] = _read_float64(lf) header['PROJ_D0'] = _read_float64(lf) header['PROJPANG'] = _read_float64(lf) ptyp = _read_int32(lf) header['PROJXAXI'] = _read_int32(lf) header['PROJYAXI'] = _read_int32(lf) elif proj_words != 0: raise ValueError("Invalid # of projection keywords") for kw in header: if 'CTYPE' in kw: if header[kw].strip() in cel_types: n_dashes = 5-len(header[kw].strip()) header[kw] = header[kw].strip()+ '-'*n_dashes + _proj_dict[ptyp] for ii,((ref,val,inc),code) in enumerate(zip(convert,ijcode)): if ii in valid_dims: # jul14a gio/to_imfits.f90 line 284-313 if ptyp != 0 and (ii+1) in (header['PROJXAXI'], header['PROJYAXI']): #! Compute reference pixel so that VAL(REF) = 0 ref = ref - val/inc if (ii+1) == header['PROJXAXI']: val = header['PROJ_A0'] elif (ii+1) == header['PROJYAXI']: val = header['PROJ_D0'] else: raise ValueError("Impossible state - code bug.") val = val*r2deg inc = inc*r2deg rota = r2deg*header['PROJPANG'] elif code in ('RA', 'L', 'B', 'DEC', 'LII', 'BII', 'GLAT', 'GLON', 'LAT', 'LON'): val = val*r2deg inc = inc*r2deg rota = 0.0 # These are not implemented: prefer to maintain original units (we're # reading in to spectral_cube after all, no need to change units until the # output step) #elseif (code.eq.'FREQUENCY') then #val = val*1.0d6 ! MHz to Hz #inc = inc*1.0d6 #elseif (code.eq.'VELOCITY') then #code = 'VRAD' ! force VRAD instead of VELOCITY for CASA #val = val*1.0d3 ! km/s to m/s #inc = inc*1.0d3 header['CRPIX{0}'.format(ii+1)] = ref header['CRVAL{0}'.format(ii+1)] = val header['CDELT{0}'.format(ii+1)] = inc for ii,ctype in enumerate(ijcode): if ii in valid_dims: header['CTYPE{0}'.format(ii+1)] = _ctype_dict[ctype] header['CUNIT{0}'.format(ii+1)] = _cunit_dict[ctype] spec_words = _read_int32(lf) reso_start = _read_int32(lf) _check_val('reso_start', reso_start, proj_start+proj_words+2+spec_words+2) if spec_words == 14: header['FRES'] = _read_float64(lf) header['FIMA'] = _read_float64(lf) header['FREQ'] = _read_float64(lf) header['VRES'] = _read_float32(lf) header['VOFF'] = _read_float32(lf) header['DOPP'] = _read_float32(lf) header['FAXI'] = _read_int32(lf) header['LINENAME'] = _read_string(lf, 12) header['VTYPE'] = _read_int32(lf) elif spec_words != 0: raise ValueError("Invalid # of spectroscopic keywords") #! SPECTROSCOPY #integer(kind=4) :: spec_words = 14 ! Spectroscopy length: 14 used #integer(kind=4) :: reso_start !! = spec_words + 16 #real(kind=8) :: fres = 0.d0 !101 Frequency resolution #real(kind=8) :: fima = 0.d0 !103 Image frequency #real(kind=8) :: freq = 0.d0 !105 Rest Frequency #real(kind=4) :: vres = 0.0 !107 Velocity resolution #real(kind=4) :: voff = 0.0 !108 Velocity offset #real(kind=4) :: dopp = 0.0 ! Doppler factor #integer(kind=4) :: faxi = 0 !109 Frequency axis #integer(kind=4) :: ijlin(3) = 0 ! 98 Line name #integer(kind=4) :: vtyp = vel_unk ! Velocity type (see vel_... codes) reso_words = _read_int32(lf) nois_start = _read_int32(lf) _check_val('nois_start', nois_start, proj_start+proj_words+2+spec_words+2+reso_words+2) if reso_words == 3: header['BMAJ'] = _read_float32(lf) header['BMIN'] = _read_float32(lf) header['BPA'] = _read_float32(lf) #pad_reso = _read_float32(lf) elif reso_words != 0: raise ValueError("Invalid # of resolution keywords") #! RESOLUTION #integer(kind=4) :: reso_words = 3 ! Resolution length: 3 used + 1 padding #integer(kind=4) :: nois_start !! = reso_words + 6 #real(kind=4) :: majo = 0.0 !111 Major axis #real(kind=4) :: mino = 0.0 !112 Minor axis #real(kind=4) :: posa = 0.0 !113 Position angle #real(kind=4) :: pad_reso nois_words = _read_int32(lf) astr_start = _read_int32(lf) _check_val('astr_start', astr_start, proj_start+proj_words+2+spec_words+2+reso_words+2+nois_words+2) if nois_words == 2: header['NOISE_T'] = (_read_float32(lf), "Theoretical Noise") header['NOISERMS'] = (_read_float32(lf), "Measured (RMS) noise") elif nois_words != 0: raise ValueError("Invalid # of noise keywords") #! NOISE #integer(kind=4) :: nois_words = 2 ! Noise section length: 2 used #integer(kind=4) :: astr_start !! = s_nois + 4 #real(kind=4) :: noise = 0.0 ! 115 Theoretical noise #real(kind=4) :: rms = 0.0 ! 116 Actual noise astr_words = _read_int32(lf) uvda_start = _read_int32(lf) _check_val('uvda_start', uvda_start, proj_start+proj_words+2+spec_words+2+reso_words+2+nois_words+2+astr_words+2) if astr_words == 3: header['MURA'] = _read_float32(lf) header['MUDEC'] = _read_float32(lf) header['PARALLAX'] = _read_float32(lf) elif astr_words != 0: raise ValueError("Invalid # of astrometry keywords") #! ASTROMETRY #integer(kind=4) :: astr_words = 3 ! Proper motion section length: 3 used + 1 padding #integer(kind=4) :: uvda_start !! = s_astr + 4 #real(kind=4) :: mura = 0.0 ! 118 along RA, in mas/yr #real(kind=4) :: mudec = 0.0 ! 119 along Dec, in mas/yr #real(kind=4) :: parallax = 0.0 ! 120 in mas #real(kind=4) :: pad_astr #! real(kind=4) :: pepoch = 2000.0 ! 121 in yrs ? code_uvt_last=25 uvda_words = _read_int32(lf) void_start = _read_int32(lf) _check_val('void_start', void_start, proj_start + proj_words + 2 + spec_words + 2 + reso_words + 2 + nois_words + 2 + astr_words + 2 + uvda_words + 2) if uvda_words == 18+2*code_uvt_last: version_uv = _read_int32(lf) nchan = _read_int32(lf) nvisi = _read_int64(lf) nstokes = _read_int32(lf) natom = _read_int32(lf) basemin = _read_float32(lf) basemax = _read_float32(lf) fcol = _read_int32(lf) lcol = _read_int32(lf) nlead = _read_int32(lf) ntrail = _read_int32(lf) column_pointer = np.fromfile(lf, count=code_uvt_last, dtype='int32') column_size = np.fromfile(lf, count=code_uvt_last, dtype='int32') column_codes = np.fromfile(lf, count=nlead+ntrail, dtype='int32') column_types = np.fromfile(lf, count=nlead+ntrail, dtype='int32') order = _read_int32(lf) nfreq = _read_int32(lf) atoms = np.fromfile(lf, count=4, dtype='int32') elif uvda_words != 0: raise ValueError("Invalid # of UV data keywords") #! UV_DATA information #integer(kind=4) :: uvda_words = 18+2*code_uvt_last ! Length of section: 14 used #integer(kind=4) :: void_start !! = s_uvda + l_uvda + 2 #integer(kind=4) :: version_uv = code_version_uvt_current ! 1 version number. Will allow us to change the data format #integer(kind=4) :: nchan = 0 ! 2 Number of channels #integer(kind=8) :: nvisi = 0 ! 3-4 Independent of the transposition status #integer(kind=4) :: nstokes = 0 ! 5 Number of polarizations #integer(kind=4) :: natom = 0 ! 6. 3 for real, imaginary, weight. 1 for real. #real(kind=4) :: basemin = 0. ! 7 Minimum Baseline #real(kind=4) :: basemax = 0. ! 8 Maximum Baseline #integer(kind=4) :: fcol ! 9 Column of first channel #integer(kind=4) :: lcol ! 10 Column of last channel #! The number of information per channel can be obtained by #! (lcol-fcol+1)/(nchan*natom) #! so this could allow to derive the number of Stokes parameters #! Leading data at start of each visibility contains specific information #integer(kind=4) :: nlead = 7 ! 11 Number of leading informations (at lest 7) #! Trailing data at end of each visibility may hold additional information #integer(kind=4) :: ntrail = 0 ! 12 Number of trailing informations #! #! Leading / Trailing information codes have been specified before #integer(kind=4) :: column_pointer(code_uvt_last) = code_null ! Back pointer to the columns... #integer(kind=4) :: column_size(code_uvt_last) = 0 ! Number of columns for each #! In the data, we instead have the codes for each column #! integer(kind=4) :: column_codes(nlead+ntrail) ! Start column for each ... #! integer(kind=4) :: column_types(nlead+ntrail) /0,1,2/ ! Number of columns for each: 1 real*4, 2 real*8 #! Leading / Trailing information codes #! #integer(kind=4) :: order = 0 ! 13 Stoke/Channel ordering #integer(kind=4) :: nfreq = 0 ! 14 ! 0 or = nchan*nstokes #integer(kind=4) :: atoms(4) ! 15-18 Atom description #! #real(kind=8), pointer :: freqs(:) => null() ! (nchan*nstokes) = 0d0 #integer(kind=4), pointer :: stokes(:) => null() ! (nchan*nstokes) or (nstokes) = code_stoke #! #real(kind=8), pointer :: ref(:) => null() #real(kind=8), pointer :: val(:) => null() #real(kind=8), pointer :: inc(:) => null() lf.seek(1024) real_dims = dims[:ndim] data = np.fromfile(lf, count=np.product(real_dims), dtype='float32').reshape(real_dims[::-1]) data[data==bval] = np.nan return data,header io_registry.register_reader('lmv', BaseSpectralCube, load_lmv_cube) io_registry.register_reader('class_lmv', BaseSpectralCube, load_lmv_cube) io_registry.register_identifier('lmv', BaseSpectralCube, is_lmv)
radio-astro-tools/spectral-cube
spectral_cube/io/class_lmv.py
Python
bsd-3-clause
29,674
""" Github Enterprise OAuth2 backend, docs at: https://python-social-auth.readthedocs.io/en/latest/backends/github_enterprise.html """ from six.moves.urllib.parse import urljoin from ..utils import append_slash from .github import GithubOAuth2, GithubOrganizationOAuth2, \ GithubTeamOAuth2 class GithubEnterpriseMixin(object): def api_url(self): return append_slash(self.setting('API_URL')) def authorization_url(self): return self._url('login/oauth/authorize') def access_token_url(self): return self._url('login/oauth/access_token') def _url(self, path): return urljoin(append_slash(self.setting('URL')), path) class GithubEnterpriseOAuth2(GithubEnterpriseMixin, GithubOAuth2): """Github Enterprise OAuth authentication backend""" name = 'github-enterprise' class GithubEnterpriseOrganizationOAuth2(GithubEnterpriseMixin, GithubOrganizationOAuth2): """Github Enterprise OAuth2 authentication backend for organizations""" name = 'github-enterprise-org' DEFAULT_SCOPE = ['read:org'] class GithubEnterpriseTeamOAuth2(GithubEnterpriseMixin, GithubTeamOAuth2): """Github Enterprise OAuth2 authentication backend for teams""" name = 'github-enterprise-team' DEFAULT_SCOPE = ['read:org']
tobias47n9e/social-core
social_core/backends/github_enterprise.py
Python
bsd-3-clause
1,344
#http://documen.tician.de/pyopencl/ import pyopencl as cl import numpy as np import struct import timing timings = timing.Timing() #ctx = cl.create_some_context() mf = cl.mem_flags class Bitonic: def __init__(self, max_elements, cta_size, dtype): plat = cl.get_platforms()[0] device = plat.get_devices()[0] self.ctx = cl.Context(devices=[device]) self.queue = cl.CommandQueue(self.ctx, device) self.loadProgram() self.uintsz = dtype.itemsize self.d_tempKeys = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements) self.d_tempValues = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements) def loadProgram(self): self.local_size_limit = 512 options = "-D LOCAL_SIZE_LIMIT=%d" % (self.local_size_limit,) print "build bitonic" f = open("bitonic.cl", 'r') fstr = "".join(f.readlines()) self.bitonic_prg = cl.Program(self.ctx, fstr).build(options=options) def factorRadix2(self, L): if(not L): log2L = 0 return log2L, 0 else: #for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++); log2L = 0 while ((L & 1) == 0): L >>=1 log2L += 1 return log2L, L; @timings("Bitonic Sort") def sort(self, num, keys, values, batch=1, direction=1): print "bitonic sort" #num must be a power of 2 and <= max_num log2l, remainder = self.factorRadix2(num) if remainder != 1: return #self.keys = keys #self.values = values self.keys = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=keys) self.values = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=values) self.queue.finish() direction = (direction != 0) array_length = keys.size print "array_length", array_length if array_length < self.local_size_limit: self.local(array_length, direction) else: self.local1(batch, array_length, direction) size = 2 * self.local_size_limit while size <= array_length: stride = size / 2 while stride > 0: print "size, stride", size, stride if stride >= self.local_size_limit: self.merge_global(batch, array_length, stride, size, direction) else: self.merge_local(batch, array_length, size, stride, direction) break stride >>= 1 size <<= 1 self.queue.finish() #need to copy back cl.enqueue_copy_buffer(self.queue, self.d_tempKeys, self.keys).wait() cl.enqueue_copy_buffer(self.queue, self.d_tempValues, self.values).wait() self.queue.finish() #copy to cpu to view results cl.enqueue_read_buffer(self.queue, self.keys, keys) cl.enqueue_read_buffer(self.queue, self.values, values) self.queue.finish() #cl.enqueue_read_buffer(self.queue, self.d_tempKeys, keys).wait() #cl.enqueue_read_buffer(self.queue, self.d_tempValues, values).wait() return keys, values @timings("Bitonic: merge global") def merge_global(self, batch, array_length, stride, size, direction): local_size = None global_size = (batch * array_length / 2,) merge_global_args = ( self.d_tempKeys, self.d_tempValues, self.d_tempKeys, self.d_tempValues, np.int32(array_length), np.int32(size), np.int32(stride), np.int32(direction) ) self.bitonic_prg.bitonicMergeGlobal(self.queue, global_size, local_size, *(merge_global_args)).wait() #self.queue.finish() @timings("Bitonic: merge local") def merge_local(self, batch, array_length, size, stride, direction): local_size = (self.local_size_limit / 2,) global_size = (batch * array_length / 2,) merge_local_args = ( self.d_tempKeys, self.d_tempValues, self.d_tempKeys, self.d_tempValues, np.int32(array_length), np.int32(stride), np.int32(size), np.int32(direction) ) self.bitonic_prg.bitonicMergeLocal(self.queue, global_size, local_size, *(merge_local_args)).wait() self.queue.finish() @timings("Bitonic: local1 ") def local1(self, batch, array_length, direction): local_size = (self.local_size_limit / 2,) global_size = (batch * array_length / 2,) #print global_size, local_size local1_args = ( self.d_tempKeys, self.d_tempValues, self.keys, self.values ) #self.bitonic_prg.bitonicSortLocal1(self.queue, global_size, local_size, *(local1_args)).wait() self.bitonic_prg.bitonicSortLocal1(self.queue, global_size, local_size, self.d_tempKeys, self.d_tempValues, self.keys, self.values).wait() self.queue.finish() @timings("Bitonic: local ") def local(self, array_length, direction): local_size = (self.local_size_limit / 2,) global_size = (batch * array_length / 2,) local_args = ( self.d_tempKeys, self.d_tempValues, self.keys, self.values, np.int32(array_length), np.int32(direction) ) self.bitonic_prg.bitonicSortLocal(self.queue, global_size, local_size, *(local_args)).wait() self.queue.finish() if __name__ == "__main__": #These tests wont work as is since class was restructured to fit in with sph n = 1048576*2 #n = 32768*2 #n = 16384 #n = 8192 hashes = np.ndarray((n,), dtype=np.uint32) indices = np.ndarray((n,), dtype=np.uint32) print "hashes size", hashes.size import sys for i in xrange(0,n): hashes[i] = 1 * sys.maxint#n - i indices[i] = i fh = [597, 598, 598, 599, 599, 597, 598, 598, 599, 599, 613, 614, 614, 615, 615, 613, 614, 614, 615, 615] for i,f in enumerate(fh): hashes[i] = f npsorted = np.sort(hashes,0) print "hashes before:", hashes[0:25].T print "indices before: ", indices[0:25].T bitonic = Bitonic(n, 128, hashes.dtype) #num_to_sort = 32768 num_to_sort = n shashes, sindices = bitonic.sort(num_to_sort, hashes, indices) #read from buffer """ hashes = numpy.ndarray((num_to_sort,), dtype=numpy.int32) cl.enqueue_read_buffer(clsystem.queue, .sort_hashes, hashes) print "hashes" print hashes.T indices = numpy.ndarray((self.num,), dtype=numpy.int32) cl.enqueue_read_buffer(self.queue, self.sort_indices, indices) print "indices" print indices.T """ print "hashes after:", shashes[0:25].T #print "sorted hashes:", npsorted[0:20].T print "indices after: ", sindices[0:25].T print np.linalg.norm(shashes - npsorted) print timings
vbud/adventures_in_opencl
experiments/bitonic/bitonic.py
Python
mit
7,569
# This is a sample file, and shows the basic framework for using an "Object" based # document, rather than a "filename" based document. # This is referenced by the Pythonwin .html documentation. # In the example below, the OpenObject() method is used instead of OpenDocumentFile, # and all the core MFC document open functionality is retained. import win32ui from pywin.mfc import docview class object_template (docview.DocTemplate): def __init__(self): docview.DocTemplate.__init__(self, None, None, None, object_view) def OpenObject(self, object): # Use this instead of OpenDocumentFile. # Look for existing open document for doc in self.GetDocumentList(): print("document is ", doc) if doc.object is object: doc.GetFirstView().ActivateFrame() return doc # not found - new one. doc = object_document(self, object) frame = self.CreateNewFrame(doc) doc.OnNewDocument() doc.SetTitle(str(object)) self.InitialUpdateFrame(frame, doc) return doc class object_document (docview.Document): def __init__(self, template, object): docview.Document.__init__(self, template) self.object = object def OnOpenDocument (self, name): raise RuntimeError("Should not be called if template strings set up correctly") return 0 class object_view (docview.EditView): def OnInitialUpdate (self): self.ReplaceSel("Object is %s" % repr(self.GetDocument().object)) def demo (): t = object_template() d = t.OpenObject(win32ui) return (t, d) if __name__=='__main__': import demoutils if demoutils.NeedGoodGUI(): demo()
sserrot/champion_relationships
venv/Lib/site-packages/pythonwin/pywin/Demos/objdoc.py
Python
mit
1,556
from __future__ import print_function import optparse parser = optparse.OptionParser() parser.add_option('-s', '--strand', type='choice', choices=['D', 'R', 'both'], default='D', help="Strand ('D', 'R' or 'both')") options, args = parser.parse_args() if len(args) != 2: parser.error('Specify 2 input file paths') with open(args[1], 'r') as snpfile: snps = [] for line in snpfile: cells = line.rstrip('\n').split('\t') snps.append((cells[0], int(cells[1]))) with open(args[0], 'r') as rsat_out_file: for line in rsat_out_file: line = line.rstrip('\n') if line.startswith('#'): continue cells = line.split('\t') ft_type = cells[1] strand = cells[3] if ft_type != 'site' or \ (options.strand != 'both' and strand != options.strand): continue gene = cells[0] start = int(cells[4]) stop = int(cells[5]) if start * stop < 0: raise ValueError("start and stop have different sign in line: %s" % line) if start < 0: seqrange = range(-stop, -start + 1) else: seqrange = range(start, stop + 1) for (snp, pos) in snps: if snp == gene and pos in seqrange: print(line) break
TGAC/tgac-galaxytools
tools/rsat_filter_snps/rsat_filter_snps.py
Python
mit
1,369
CODONS = {'AUG': "Methionine", 'UUU': "Phenylalanine", 'UUC': "Phenylalanine", 'UUA': "Leucine", 'UUG': "Leucine", 'UCU': "Serine", 'UCC': "Serine", 'UCA': "Serine", 'UCG': "Serine", 'UAU': "Tyrosine", 'UAC': "Tyrosine", 'UGU': "Cysteine", 'UGC': "Cysteine", 'UGG': "Tryptophan", 'UAA': "STOP", 'UAG': "STOP", 'UGA': "STOP"} def of_codon(codon): if codon not in CODONS: raise ValueError('Invalid codon: {}'.format(codon)) return CODONS[codon] def proteins(strand): proteins = [] for codon in map(of_codon, _chunkstring(strand, 3)): if codon == 'STOP': break proteins.append(codon) return proteins def _chunkstring(string, n): return (string[i:n + i] for i in range(0, len(string), n))
behrtam/xpython
exercises/protein-translation/example.py
Python
mit
801
# FILE: autoload/conque_term/conque_globals.py # AUTHOR: Nico Raffo <[email protected]> # WEBSITE: http://conque.googlecode.com # MODIFIED: __MODIFIED__ # VERSION: __VERSION__, for Vim 7.0 # LICENSE: # Conque - Vim terminal/console emulator # Copyright (C) 2009-__YEAR__ Nico Raffo # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Common global constants and functions for Conque.""" import sys import re import os # DEBUG import logging # DEBUG import traceback # DEBUG # PYTHON VERSION CONQUE_PYTHON_VERSION = sys.version_info[0] # Encoding try: # Vim's character encoding import vim CONQUE_VIM_ENCODING = vim.eval('&encoding') except: CONQUE_VIM_ENCODING = 'utf-8' def u(str_val, str_encoding='utf-8', errors='strict'): """ Foolhardy attempt to make unicode string syntax compatible with both python 2 and 3. """ if not str_val: str_val = '' if CONQUE_PYTHON_VERSION == 3: return str_val else: return unicode(str_val, str_encoding, errors) def uchr(str): """ Foolhardy attempt to make unicode string syntax compatible with both python 2 and 3. """ if CONQUE_PYTHON_VERSION == 3: return chr(str) else: return unichr(str) # Logging # enable logging # DEBUG CONQUE_LOG_FILENAME = None # DEBUG if os.path.exists('/home/nraffo/.vim/'): # DEBUG CONQUE_LOG_FILENAME = '/home/nraffo/.vim/pylog.log' # DEBUG elif os.path.exists('C:/Documents and Settings/nraffo/vimfiles/'): # DEBUG CONQUE_LOG_FILENAME = 'C:/Documents and Settings/nraffo/vimfiles/pylog.log' # DEBUG elif os.path.exists('/Users/nraffo/vimfiles/'): # DEBUG CONQUE_LOG_FILENAME = '/Users/nraffo/vimfiles/pylog.log' # DEBUG CONQUE_LOG_LEVEL = logging.INFO #DEBUG if CONQUE_LOG_FILENAME: # DEBUG logging.basicConfig(filename=CONQUE_LOG_FILENAME, level=CONQUE_LOG_LEVEL) # DEBUG # Unix escape sequence settings CONQUE_CTL = { 1: 'soh', # start of heading 2: 'stx', # start of text 7: 'bel', # bell 8: 'bs', # backspace 9: 'tab', # tab 10: 'nl', # new line 13: 'cr', # carriage return 14: 'so', # shift out 15: 'si' # shift in } # 11 : 'vt', # vertical tab # 12 : 'ff', # form feed # Escape sequences CONQUE_ESCAPE = { 'm': 'font', 'J': 'clear_screen', 'K': 'clear_line', '@': 'add_spaces', 'A': 'cursor_up', 'B': 'cursor_down', 'C': 'cursor_right', 'D': 'cursor_left', 'G': 'cursor_to_column', 'H': 'cursor', 'P': 'delete_chars', 'f': 'cursor', 'g': 'tab_clear', 'r': 'set_coords', 'h': 'set', 'l': 'reset' } # 'L': 'insert_lines', # 'M': 'delete_lines', # 'd': 'cusor_vpos', # Alternate escape sequences, no [ CONQUE_ESCAPE_PLAIN = { 'D': 'scroll_up', 'E': 'next_line', 'H': 'set_tab', 'M': 'scroll_down' } # 'N': 'single_shift_2', # 'O': 'single_shift_3', # '=': 'alternate_keypad', # '>': 'numeric_keypad', # '7': 'save_cursor', # '8': 'restore_cursor', # Character set escape sequences, with "(" CONQUE_ESCAPE_CHARSET = { 'A': 'uk', 'B': 'us', '0': 'graphics' } # Uber alternate escape sequences, with # or ? CONQUE_ESCAPE_QUESTION = { '1h': 'new_line_mode', '3h': '132_cols', '4h': 'smooth_scrolling', '5h': 'reverse_video', '6h': 'relative_origin', '7h': 'set_auto_wrap', '8h': 'set_auto_repeat', '9h': 'set_interlacing_mode', '1l': 'set_cursor_key', '2l': 'set_vt52', '3l': '80_cols', '4l': 'set_jump_scrolling', '5l': 'normal_video', '6l': 'absolute_origin', '7l': 'reset_auto_wrap', '8l': 'reset_auto_repeat', '9l': 'reset_interlacing_mode' } CONQUE_ESCAPE_HASH = { '8': 'screen_alignment_test' } # '3': 'double_height_top', # '4': 'double_height_bottom', # '5': 'single_height_single_width', # '6': 'single_height_double_width', CONQUE_GRAPHICS_SET = [ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x2192, 0x2190, 0x2191, 0x2193, 0x002F, 0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F, 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x00A0, 0x25C6, 0x2592, 0x2409, 0x240C, 0x240D, 0x240A, 0x00B0, 0x00B1, 0x2591, 0x240B, 0x2518, 0x2510, 0x250C, 0x2514, 0x253C, 0xF800, 0xF801, 0x2500, 0xF803, 0xF804, 0x251C, 0x2524, 0x2534, 0x252C, 0x2502, 0x2264, 0x2265, 0x03C0, 0x2260, 0x00A3, 0x00B7, 0x007F, 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7, 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, 0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, 0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF, 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF ] # Font codes CONQUE_FONT = { 0: {'description': 'Normal (default)', 'attributes': {'cterm': 'NONE', 'ctermfg': 'NONE', 'ctermbg': 'NONE', 'gui': 'NONE', 'guifg': 'NONE', 'guibg': 'NONE'}, 'normal': True}, 1: {'description': 'Bold', 'attributes': {'cterm': 'BOLD', 'gui': 'BOLD'}, 'normal': False}, 4: {'description': 'Underlined', 'attributes': {'cterm': 'UNDERLINE', 'gui': 'UNDERLINE'}, 'normal': False}, 5: {'description': 'Blink (appears as Bold)', 'attributes': {'cterm': 'BOLD', 'gui': 'BOLD'}, 'normal': False}, 7: {'description': 'Inverse', 'attributes': {'cterm': 'REVERSE', 'gui': 'REVERSE'}, 'normal': False}, 8: {'description': 'Invisible (hidden)', 'attributes': {'ctermfg': '0', 'ctermbg': '0', 'guifg': '#000000', 'guibg': '#000000'}, 'normal': False}, 22: {'description': 'Normal (neither bold nor faint)', 'attributes': {'cterm': 'NONE', 'gui': 'NONE'}, 'normal': True}, 24: {'description': 'Not underlined', 'attributes': {'cterm': 'NONE', 'gui': 'NONE'}, 'normal': True}, 25: {'description': 'Steady (not blinking)', 'attributes': {'cterm': 'NONE', 'gui': 'NONE'}, 'normal': True}, 27: {'description': 'Positive (not inverse)', 'attributes': {'cterm': 'NONE', 'gui': 'NONE'}, 'normal': True}, 28: {'description': 'Visible (not hidden)', 'attributes': {'ctermfg': 'NONE', 'ctermbg': 'NONE', 'guifg': 'NONE', 'guibg': 'NONE'}, 'normal': True}, 30: {'description': 'Set foreground color to Black', 'attributes': {'ctermfg': '16', 'guifg': '#000000'}, 'normal': False}, 31: {'description': 'Set foreground color to Red', 'attributes': {'ctermfg': '1', 'guifg': '#ff0000'}, 'normal': False}, 32: {'description': 'Set foreground color to Green', 'attributes': {'ctermfg': '2', 'guifg': '#00ff00'}, 'normal': False}, 33: {'description': 'Set foreground color to Yellow', 'attributes': {'ctermfg': '3', 'guifg': '#ffff00'}, 'normal': False}, 34: {'description': 'Set foreground color to Blue', 'attributes': {'ctermfg': '4', 'guifg': '#0000ff'}, 'normal': False}, 35: {'description': 'Set foreground color to Magenta', 'attributes': {'ctermfg': '5', 'guifg': '#990099'}, 'normal': False}, 36: {'description': 'Set foreground color to Cyan', 'attributes': {'ctermfg': '6', 'guifg': '#009999'}, 'normal': False}, 37: {'description': 'Set foreground color to White', 'attributes': {'ctermfg': '7', 'guifg': '#ffffff'}, 'normal': False}, 39: {'description': 'Set foreground color to default (original)', 'attributes': {'ctermfg': 'NONE', 'guifg': 'NONE'}, 'normal': True}, 40: {'description': 'Set background color to Black', 'attributes': {'ctermbg': '16', 'guibg': '#000000'}, 'normal': False}, 41: {'description': 'Set background color to Red', 'attributes': {'ctermbg': '1', 'guibg': '#ff0000'}, 'normal': False}, 42: {'description': 'Set background color to Green', 'attributes': {'ctermbg': '2', 'guibg': '#00ff00'}, 'normal': False}, 43: {'description': 'Set background color to Yellow', 'attributes': {'ctermbg': '3', 'guibg': '#ffff00'}, 'normal': False}, 44: {'description': 'Set background color to Blue', 'attributes': {'ctermbg': '4', 'guibg': '#0000ff'}, 'normal': False}, 45: {'description': 'Set background color to Magenta', 'attributes': {'ctermbg': '5', 'guibg': '#990099'}, 'normal': False}, 46: {'description': 'Set background color to Cyan', 'attributes': {'ctermbg': '6', 'guibg': '#009999'}, 'normal': False}, 47: {'description': 'Set background color to White', 'attributes': {'ctermbg': '7', 'guibg': '#ffffff'}, 'normal': False}, 49: {'description': 'Set background color to default (original).', 'attributes': {'ctermbg': 'NONE', 'guibg': 'NONE'}, 'normal': True}, 90: {'description': 'Set foreground color to Black', 'attributes': {'ctermfg': '8', 'guifg': '#000000'}, 'normal': False}, 91: {'description': 'Set foreground color to Red', 'attributes': {'ctermfg': '9', 'guifg': '#ff0000'}, 'normal': False}, 92: {'description': 'Set foreground color to Green', 'attributes': {'ctermfg': '10', 'guifg': '#00ff00'}, 'normal': False}, 93: {'description': 'Set foreground color to Yellow', 'attributes': {'ctermfg': '11', 'guifg': '#ffff00'}, 'normal': False}, 94: {'description': 'Set foreground color to Blue', 'attributes': {'ctermfg': '12', 'guifg': '#0000ff'}, 'normal': False}, 95: {'description': 'Set foreground color to Magenta', 'attributes': {'ctermfg': '13', 'guifg': '#990099'}, 'normal': False}, 96: {'description': 'Set foreground color to Cyan', 'attributes': {'ctermfg': '14', 'guifg': '#009999'}, 'normal': False}, 97: {'description': 'Set foreground color to White', 'attributes': {'ctermfg': '15', 'guifg': '#ffffff'}, 'normal': False}, 100: {'description': 'Set background color to Black', 'attributes': {'ctermbg': '8', 'guibg': '#000000'}, 'normal': False}, 101: {'description': 'Set background color to Red', 'attributes': {'ctermbg': '9', 'guibg': '#ff0000'}, 'normal': False}, 102: {'description': 'Set background color to Green', 'attributes': {'ctermbg': '10', 'guibg': '#00ff00'}, 'normal': False}, 103: {'description': 'Set background color to Yellow', 'attributes': {'ctermbg': '11', 'guibg': '#ffff00'}, 'normal': False}, 104: {'description': 'Set background color to Blue', 'attributes': {'ctermbg': '12', 'guibg': '#0000ff'}, 'normal': False}, 105: {'description': 'Set background color to Magenta', 'attributes': {'ctermbg': '13', 'guibg': '#990099'}, 'normal': False}, 106: {'description': 'Set background color to Cyan', 'attributes': {'ctermbg': '14', 'guibg': '#009999'}, 'normal': False}, 107: {'description': 'Set background color to White', 'attributes': {'ctermbg': '15', 'guibg': '#ffffff'}, 'normal': False} } # regular expression matching (almost) all control sequences CONQUE_SEQ_REGEX = re.compile("(\x1b\[?\??#?[0-9;]*[a-zA-Z0-9@=>]|\x1b\][0-9];.*?\x07|[\x01-\x0f]|\x1b\([AB0])") CONQUE_SEQ_REGEX_CTL = re.compile("^[\x01-\x0f]$") CONQUE_SEQ_REGEX_CSI = re.compile("^\x1b\[") CONQUE_SEQ_REGEX_TITLE = re.compile("^\x1b\]") CONQUE_SEQ_REGEX_HASH = re.compile("^\x1b#") CONQUE_SEQ_REGEX_ESC = re.compile("^\x1b.$") CONQUE_SEQ_REGEX_CHAR = re.compile("^\x1b[()]") # match table output CONQUE_TABLE_OUTPUT = re.compile("^\s*\|\s.*\s\|\s*$|^\s*\+[=+-]+\+\s*$") # basic terminal colors CONQUE_COLOR_SEQUENCE = ( '000', '009', '090', '099', '900', '909', '990', '999', '000', '00f', '0f0', '0ff', 'f00', 'f0f', 'ff0', 'fff' ) # Windows subprocess constants # shared memory size CONQUE_SOLE_BUFFER_LENGTH = 1000 CONQUE_SOLE_INPUT_SIZE = 1000 CONQUE_SOLE_STATS_SIZE = 1000 CONQUE_SOLE_COMMANDS_SIZE = 255 CONQUE_SOLE_RESCROLL_SIZE = 255 CONQUE_SOLE_RESIZE_SIZE = 255 # interval of screen redraw # larger number means less frequent CONQUE_SOLE_SCREEN_REDRAW = 50 # interval of full buffer redraw # larger number means less frequent CONQUE_SOLE_BUFFER_REDRAW = 500 # interval of full output bucket replacement # larger number means less frequent, 1 = every time CONQUE_SOLE_MEM_REDRAW = 1000 # maximum number of lines with terminal colors # ignored if g:ConqueTerm_Color = 2 CONQUE_MAX_SYNTAX_LINES = 200 # windows input splitting on special keys CONQUE_WIN32_REGEX_VK = re.compile("(\x1b\[[0-9;]+VK)") # windows attribute string splitting CONQUE_WIN32_REGEX_ATTR = re.compile("((.)\\2*)", re.DOTALL) # special key attributes CONQUE_VK_ATTR_CTRL_PRESSED = u('1024')
jcordry/dotfiles
vim/bundle/conque/autoload/conque_term/conque_globals.py
Python
mit
14,560
from django.conf.urls import patterns, include, url from rest_framework.routers import DefaultRouter from django.contrib import admin from tweeter import views admin.autodiscover() router = DefaultRouter() router.register(r'tweets', views.TweetViewSet) router.register(r'users', views.UserViewSet) urlpatterns = patterns('', url(r'^admin/', include(admin.site.urls)), url(r'^api/', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^$', views.index, name='index'), )
nnja/tweeter
angulardjango/urls.py
Python
mit
625
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # Copyright (C) 2009-2011 Gary Burton # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # Python classes # #------------------------------------------------------------------------- #------------------------------------------------------------------------- # # GTK libraries # #------------------------------------------------------------------------- from gi.repository import Gtk from gi.repository import Gdk #------------------------------------------------------------------------- # # Gramps classes # #------------------------------------------------------------------------- from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext from ...widgets import SimpleButton from .grampstab import GrampsTab from gramps.gen.errors import WindowActiveError from ...utils import match_primary_mask _KP_ENTER = Gdk.keyval_from_name("KP_Enter") _RETURN = Gdk.keyval_from_name("Return") _DEL = Gdk.keyval_from_name("Delete") _ADD = Gdk.keyval_from_name("Insert") _OPEN = Gdk.keyval_from_name("o") _LEFT = Gdk.keyval_from_name("Left") _RIGHT = Gdk.keyval_from_name("Right") #------------------------------------------------------------------------- # # Classes # #------------------------------------------------------------------------- class ButtonTab(GrampsTab): """ This class derives from the base GrampsTab, yet is not a usable Tab. It serves as another base tab for classes which need an Add/Edit/Remove button combination. """ _MSG = { 'add' : _('Add'), 'del' : _('Remove'), 'edit' : _('Edit'), 'share' : _('Share'), 'jump' : _('Jump To'), 'up' : _('Move Up'), 'down' : _('Move Down'), 'left' : _('Move Left'), 'right' : _('Move right') } L_R = 2 # indicator for left/right move buttons def __init__(self, dbstate, uistate, track, name, share_button=False, move_buttons=False, jump_button=False, top_label=None): """ Similar to the base class, except after Build. @param dbstate: The database state. Contains a reference to the database, along with other state information. The GrampsTab uses this to access the database and to pass to and created child windows (such as edit dialogs). @type dbstate: DbState @param uistate: The UI state. Used primarily to pass to any created subwindows. @type uistate: DisplayState @param track: The window tracking mechanism used to manage windows. This is only used to pass to generted child windows. @type track: list @param name: Notebook label name @type name: str/unicode @param share_button: Add a share button to the Notebook tab or not @type name: bool @param move_buttons: Add up and down button to the Notebook tab or not @type name: bool @param jump_button: Add a goto button @type name: bool @param top_label: Add a label in front of the buttons if given @type top_label: string or None for no label """ self.dirty_selection = False GrampsTab.__init__(self,dbstate, uistate, track, name) self._create_buttons(share_button, move_buttons, jump_button, top_label) def _create_buttons(self, share_button, move_buttons, jump_button, top_label): """ Create a button box consisting of three buttons, one for Add, one for Edit, and one for Delete. Add buttons for Share, Move and Jump depending on parameters. This button box is then appended hbox (self). Prepend a label if top_label given Note: some ButtonTab subclasses override this method. """ if top_label: self.top_label = Gtk.Label(label=top_label) self.top_label.set_use_markup(True) self.track_ref_for_deletion("top_label") self.add_btn = SimpleButton('list-add', self.add_button_clicked) self.edit_btn = SimpleButton('gtk-edit', self.edit_button_clicked) self.del_btn = SimpleButton('list-remove', self.del_button_clicked) self.track_ref_for_deletion("add_btn") self.track_ref_for_deletion("edit_btn") self.track_ref_for_deletion("del_btn") self.add_btn.set_tooltip_text(self._MSG['add']) self.edit_btn.set_tooltip_text(self._MSG['edit']) self.del_btn.set_tooltip_text(self._MSG['del']) if share_button: self.share_btn = SimpleButton('gtk-index', self.share_button_clicked) self.share_btn.set_tooltip_text(self._MSG['share']) self.track_ref_for_deletion("share_btn") else: self.share_btn = None if move_buttons: l_r = move_buttons == self.L_R self.up_btn = SimpleButton('go-previous' if l_r else 'go-up', self.up_button_clicked) self.up_btn.set_tooltip_text(self._MSG['left' if l_r else 'up']) self.down_btn = SimpleButton('go-next' if l_r else 'go-down', self.down_button_clicked) self.down_btn.set_tooltip_text( self._MSG['right' if l_r else 'down']) self.track_ref_for_deletion("up_btn") self.track_ref_for_deletion("down_btn") else: self.up_btn = None self.down_btn = None if jump_button: self.jump_btn = SimpleButton('go-jump', self.jump_button_clicked) self.track_ref_for_deletion("jump_btn") self.jump_btn.set_tooltip_text(self._MSG['jump']) else: self.jump_btn = None hbox = Gtk.Box() hbox.set_spacing(6) if top_label: hbox.pack_start(self.top_label, False, True, 0) hbox.pack_start(self.add_btn, False, True, 0) if share_button: hbox.pack_start(self.share_btn, False, True, 0) hbox.pack_start(self.edit_btn, False, True, 0) hbox.pack_start(self.del_btn, False, True, 0) if move_buttons: hbox.pack_start(self.up_btn, False, True, 0) hbox.pack_start(self.down_btn, False, True, 0) if self.jump_btn: hbox.pack_start(self.jump_btn, False, True, 0) hbox.show_all() self.pack_start(hbox, False, True, 0) if self.dbstate.db.readonly: self.add_btn.set_sensitive(False) self.del_btn.set_sensitive(False) if share_button: self.share_btn.set_sensitive(False) if jump_button and self.jump_btn: self.jump_btn.set_sensitive(False) if move_buttons: self.up_btn.set_sensitive(False) self.down_btn.set_sensitive(False) def double_click(self, obj, event): """ Handles the double click on list. If the double click occurs, the Edit button handler is called """ if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and event.button == 1): try: self.edit_button_clicked(obj) except WindowActiveError: pass def key_pressed(self, obj, event): """ Handles the return key being pressed on list. If the key is pressed, the Edit button handler is called """ if event.type == Gdk.EventType.KEY_PRESS: #print 'key pressed', event.keyval, event.get_state(), _ADD if event.keyval in (_RETURN, _KP_ENTER): try: self.edit_button_clicked(obj) except WindowActiveError: pass elif event.keyval in (_DEL,) and self.del_btn: if self.dirty_selection or self.dbstate.db.readonly: return self.del_button_clicked(obj) elif event.keyval in (_ADD,) and self.add_btn: if self.dirty_selection or self.dbstate.db.readonly: return self.add_button_clicked(obj) elif event.keyval in (_OPEN,) and self.share_btn and \ match_primary_mask(event.get_state()): self.share_button_clicked(obj) elif event.keyval in (_LEFT,) and \ (event.get_state() & Gdk.ModifierType.MOD1_MASK): self.prev_page() elif event.keyval in (_RIGHT,) and \ (event.get_state() & Gdk.ModifierType.MOD1_MASK): self.next_page() else: return return True def add_button_clicked(self, obj): """ Function called with the Add button is clicked. This function should be overridden by the derived class. """ print("Uncaught Add clicked") def share_button_clicked(self, obj): """ Function called with the Share button is clicked. This function should be overridden by the derived class. """ print("Uncaught Share clicked") def jump_button_clicked(self, obj): """ Function called with the Jump button is clicked. This function should be overridden by the derived class. """ print("Uncaught Jump clicked") def del_button_clicked(self, obj): """ Function called with the Delete button is clicked. This function should be overridden by the derived class. """ print("Uncaught Delete clicked") def edit_button_clicked(self, obj): """ Function called with the Edit button is clicked or the double click is caught. This function should be overridden by the derived class. """ print("Uncaught Edit clicked") def up_button_clicked(self, obj): """ Function called with the Up button is clicked. This function should be overridden by the derived class. """ print("Uncaught Up clicked") def down_button_clicked(self, obj): """ Function called with the Down button is clicked. This function should be overridden by the derived class. """ print("Uncaught Down clicked") def _selection_changed(self, obj=None): """ Attached to the selection's 'changed' signal. Checks to see if anything is selected. If it is, the edit and delete buttons are enabled, otherwise the are disabled. """ # Comparing to None is important, as empty strings # and 0 can be returned # This method is called as callback on change, and can be called # explicitly, dirty_selection must make sure they do not interact if self.dirty_selection: return if self.get_selected() is not None: self.edit_btn.set_sensitive(True) if self.jump_btn: self.jump_btn.set_sensitive(True) if not self.dbstate.db.readonly: self.del_btn.set_sensitive(True) # note: up and down cannot be set unsensitive after clicked # or they do not respond to a next click #if self.up_btn : # self.up_btn.set_sensitive(True) # self.down_btn.set_sensitive(True) else: self.edit_btn.set_sensitive(False) if self.jump_btn: self.jump_btn.set_sensitive(False) if not self.dbstate.db.readonly: self.del_btn.set_sensitive(False) # note: up and down cannot be set unsensitive after clicked # or they do not respond to a next click #if self.up_btn : # self.up_btn.set_sensitive(False) # self.down_btn.set_sensitive(False)
SNoiraud/gramps
gramps/gui/editors/displaytabs/buttontab.py
Python
gpl-2.0
12,717
#! /usr/bin/python3 # SPDX-License-Identifier: GPL-2.0+ # Copyright 2019 Google LLC # """ Script to remove boards Usage: rmboard.py <board_name>... A single commit is created for each board removed. Some boards may depend on files provided by another and this will cause problems, generally the removal of files which should not be removed. This script works by: - Looking through the MAINTAINERS files which mention a board to find out what files the board uses - Looking through the Kconfig files which mention a board to find one that needs to have material removed Search for ## to update the commit message manually. """ import glob import os import re import sys from patman import command def rm_kconfig_include(path): """Remove a path from Kconfig files This function finds the given path in a 'source' statement in a Kconfig file and removes that line from the file. This is needed because the path is going to be removed, so any reference to it will cause a problem with Kconfig parsing. The changes are made locally and then added to the git staging area. Args: path: Path to search for and remove """ cmd = ['git', 'grep', path] stdout = command.RunPipe([cmd], capture=True, raise_on_error=False).stdout if not stdout: return fname = stdout.split(':')[0] print("Fixing up '%s' to remove reference to '%s'" % (fname, path)) cmd = ['sed', '-i', '\|%s|d' % path, fname] stdout = command.RunPipe([cmd], capture=True).stdout cmd = ['git', 'add', fname] stdout = command.RunPipe([cmd], capture=True).stdout def rm_board(board): """Create a commit which removes a single board This looks up the MAINTAINERS file to file files that need to be removed, then removes pieces from the Kconfig files that mention the board. Args: board: Board name to remove """ # Find all MAINTAINERS and Kconfig files which mention the board cmd = ['git', 'grep', '-l', board] stdout = command.RunPipe([cmd], capture=True).stdout maintain = [] kconfig = [] for line in stdout.splitlines(): line = line.strip() if 'MAINTAINERS' in line: if line not in maintain: maintain.append(line) elif 'Kconfig' in line: kconfig.append(line) paths = [] cc = [] # Look through the MAINTAINERS file to find things to remove for fname in maintain: with open(fname) as fd: for line in fd: line = line.strip() fields = re.split('[ \t]', line, 1) if len(fields) == 2: if fields[0] == 'M:': cc.append(fields[1]) elif fields[0] == 'F:': paths.append(fields[1].strip()) # Expand any wildcards in the MAINTAINERS file real = [] for path in paths: if path[-1] == '/': path = path[:-1] if '*' in path: globbed = glob.glob(path) print("Expanded '%s' to '%s'" % (path, globbed)) real += globbed else: real.append(path) # Search for Kconfig files in the resulting list. Remove any 'source' lines # which reference Kconfig files we want to remove for path in real: cmd = ['find', path] stdout = (command.RunPipe([cmd], capture=True, raise_on_error=False). stdout) for fname in stdout.splitlines(): if fname.endswith('Kconfig'): rm_kconfig_include(fname) # Remove unwanted files cmd = ['git', 'rm', '-r'] + real stdout = command.RunPipe([cmd], capture=True).stdout ## Change the messages as needed msg = '''arm: Remove %s board This board has not been converted to CONFIG_DM_MMC by the deadline. Remove it. ''' % board for name in cc: msg += 'Patch-cc: %s\n' % name # Create the commit cmd = ['git', 'commit', '-s', '-m', msg] stdout = command.RunPipe([cmd], capture=True).stdout # Check if the board is mentioned anywhere else. The user will need to deal # with this cmd = ['git', 'grep', '-il', board] print(command.RunPipe([cmd], capture=True, raise_on_error=False).stdout) print(' '.join(cmd)) for board in sys.argv[1:]: rm_board(board)
Stane1983/u-boot
tools/rmboard.py
Python
gpl-2.0
4,368
#!/usr/bin/env python # txt2tags - generic text conversion tool # http://txt2tags.sf.net # # Copyright 2001, 2002, 2003, 2004 Aurelio Marinho Jargas # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You have received a copy of the GNU General Public License along # with this program, on the COPYING file. # # # # +-------------------------------------------------------------+ # | IMPORTANT MESSAGES, PLEASE READ | # +-------------------------------------------------------------+ # | | # | | # | v1.x COMPATIBILITY | # | ------------------ | # | | # | Due the major syntax changes, the new 2.x series | # | BREAKS backwards compatibility. | # | | # | Use the 't2tconv' script to upgrade your existing | # | v1.x files to conform the new v2.x syntax. | # | | # | Do a visual inspection on the new converted file. | # | Specially Pre & Post proc filters can break. | # | Check them! | # | | # | | # +-------------------------------------------------------------+ # # ######################################################################## # # BORING CODE EXPLANATION AHEAD # # Just read if you wish to understand how the txt2tags code works # ######################################################################## # # Version 2.0 was a complete rewrite for the program 'core'. # # Now the code that [1] parses the marked text is separated from the # code that [2] insert the target tags. # # [1] made by: def convert() # [2] made by: class BlockMaster # # The structures of the marked text are identifyed and its contents are # extracted into a data holder (Python lists and dictionaries). # # When parsing the source file, the blocks (para, lists, quote, table) # are opened with BlockMaster, right when found. Then its contents, # which spans on several lines, are feeded into a special holder on the # BlockMaster instance. Just when the block is closed, the target tags # are inserted for the full block as a whole, in one pass. This way, we # have a better control on blocks. Much better than the previous line by # line approach. # # In other words, whenever inside a block, the parser *holds* the tag # insertion process, waiting until the full block is readed. That was # needed primary to close paragraphs for the new XHTML target, but # proved to be a very good adding, improving many other processings. # # ------------------------------------------------------------------- # # There is also a brand new code for the Configuration schema, 100% # rewritten. There are new classes, all self documented: CommandLine, # SourceDocument, ConfigMaster and ConfigLines. In short, a new RAW # Config format was created, and all kind of configuration is first # converted to this format, and then a generic method parses it. # # The init processing was changed also, and now the functions which # gets informations about the input files are: get_infiles_config(), # process_source_file() and convert_this_files() # # Other parts are untouched, and remains the same as in v1.7, as the # marks regexes, target Headers and target Tags&Rules. # ######################################################################## # Now I think the code is nice, easier to read and understand #XXX Python coding warning # Avoid common mistakes: # - do NOT use newlist=list instead newlist=list[:] # - do NOT use newdic=dic instead newdic=dic.copy() # - do NOT use dic[key] instead dic.get(key) # - do NOT use del dic[key] without has_key() before #XXX Smart Image Align don't work if the image is a link # Can't fix that because the image is expanded together with the # link, at the linkbank filling moment. Only the image is passed # to parse_images(), not the full line, so it is always 'middle'. #XXX Paragraph separation not valid inside Quote # Quote will not have <p></p> inside, instead will close and open # again the <blockquote>. This really sux in CSS, when defining a # diferent background color. Still don't know how to fix it. #XXX TODO (maybe) # New mark or macro which expands to an anchor full title. # It is necessary to parse the full document in this order: # DONE 1st scan: HEAD: get all settings, including %!includeconf # DONE 2nd scan: BODY: expand includes & apply %!preproc # 3rd scan: BODY: read titles and compose TOC info # 4th scan: BODY: full parsing, expanding [#anchor] 1st # Steps 2 and 3 can be made together, with no tag adding. # Two complete body scans will be *slow*, don't know if it worths. ############################################################################## # User config (1=ON, 0=OFF) USE_I18N = 1 # use gettext for i18ned messages? (default is 1) COLOR_DEBUG = 1 # show debug messages in colors? (default is 1) HTML_LOWER = 0 # use lowercased HTML tags instead upper? (default is 0) ############################################################################## # these are all the core Python modules used by txt2tags (KISS!) import re, string, os, sys, time, getopt # program information my_url = 'http://txt2tags.sf.net' my_name = 'txt2tags' my_email = '[email protected]' my_version = '2.1' # i18n - just use if available if USE_I18N: try: import gettext # if your locale dir is different, change it here cat = gettext.Catalog('txt2tags',localedir='/usr/share/locale/') _ = cat.gettext except: _ = lambda x:x else: _ = lambda x:x # FLAGS : the conversion related flags , may be used in %!options # OPTIONS : the conversion related options, may be used in %!options # ACTIONS : the other behaviour modifiers, valid on command line only # MACROS : the valid macros with their default values for formatting # SETTINGS: global miscelaneous settings, valid on RC file only # CONFIG_KEYWORDS: the valid %!key:val keywords # # FLAGS and OPTIONS are configs that affect the converted document. # They usually have also a --no-<option> to turn them OFF. # ACTIONS are needed because when doing multiple input files, strange # behaviour would be found, as use command line interface for the # first file and gui for the second. There is no --no-<action>. # --version and --help inside %!options are also odd # TARGETS = ['html', 'xhtml', 'sgml', 'tex', 'man', 'mgp', 'moin', 'pm6', 'txt'] FLAGS = {'headers' :1 , 'enum-title' :0 , 'mask-email' :0 , 'toc-only' :0 , 'toc' :0 , 'rc' :1 , 'css-sugar' :0 , 'css-suggar' :0 , 'quiet' :0 } OPTIONS = {'target' :'', 'toc-level' :3 , 'style' :'', 'infile' :'', 'outfile' :'', 'encoding' :'', 'split' :0 , 'lang' :''} ACTIONS = {'help' :0 , 'version' :0 , 'gui' :0 , 'verbose' :0 , 'debug' :0 , 'dump-config':0 } MACROS = {'date' : '%Y%m%d', 'infile': '%f', 'mtime': '%Y%m%d', 'outfile': '%f'} SETTINGS = {} # for future use CONFIG_KEYWORDS = [ 'target', 'encoding', 'style', 'options', 'preproc','postproc', 'guicolors'] TARGET_NAMES = { 'html' : _('HTML page'), 'xhtml': _('XHTML page'), 'sgml' : _('SGML document'), 'tex' : _('LaTeX document'), 'man' : _('UNIX Manual page'), 'mgp' : _('Magic Point presentation'), 'moin' : _('MoinMoin page'), 'pm6' : _('PageMaker 6.0 document'), 'txt' : _('Plain Text'), } DEBUG = 0 # do not edit here, please use --debug VERBOSE = 0 # do not edit here, please use -v, -vv or -vvv QUIET = 0 # do not edit here, please use --quiet GUI = 0 AUTOTOC = 1 RC_RAW = [] CMDLINE_RAW = [] CONF = {} BLOCK = None regex = {} TAGS = {} rules = {} lang = 'english' TARGET = '' STDIN = STDOUT = '-' ESCCHAR = '\x00' SEPARATOR = '\x01' LISTNAMES = {'-':'list', '+':'numlist', ':':'deflist'} LINEBREAK = {'default':'\n', 'win':'\r\n', 'mac':'\r'} RCFILE = {'default':'.txt2tagsrc', 'win':'_t2trc'} # plataform specific settings LB = LINEBREAK.get(sys.platform[:3]) or LINEBREAK['default'] RC = RCFILE.get(sys.platform[:3]) or RCFILE['default'] # identify a development version #dev_suffix = '-dev'+time.strftime('%m%d',time.localtime(time.time())) #my_version = my_version + dev_suffix VERSIONSTR = _("%s version %s <%s>")%(my_name,my_version,my_url) USAGE = string.join([ '', _("Usage: %s [OPTIONS] [infile.t2t ...]") % my_name, '', _(" -t, --target set target document type. currently supported:"), ' %s' % re.sub(r"[]'[]",'',repr(TARGETS)), _(" -i, --infile=FILE set FILE as the input file name ('-' for STDIN)"), _(" -o, --outfile=FILE set FILE as the output file name ('-' for STDOUT)"), _(" -n, --enum-title enumerate all title lines as 1, 1.1, 1.1.1, etc"), _(" -H, --no-headers suppress header, title and footer contents"), _(" --headers show header, title and footer contents (default ON)"), _(" --encoding set target file encoding (utf-8, iso-8859-1, etc)"), _(" --style=FILE use FILE as the document style (like HTML CSS)"), _(" --css-sugar insert CSS-friendly tags for HTML and XHTML targets"), _(" --mask-email hide email from spam robots. [email protected] turns <x (a) y z>"), _(" --toc add TOC (Table of Contents) to target document"), _(" --toc-only print document TOC and exit"), _(" --toc-level=N set maximum TOC level (depth) to N"), _(" --rc read user config file ~/.txt2tagsrc (default ON)"), _(" --gui invoke Graphical Tk Interface"), _(" -q, --quiet quiet mode, suppress all output (except errors)"), _(" -v, --verbose print informative messages during conversion"), _(" -h, --help print this help information and exit"), _(" -V, --version print program version and exit"), _(" --dump-config print all the config found and exit"), '', _("Turn OFF options:"), " --no-outfile, --no-infile, --no-style, --no-encoding, --no-headers", " --no-toc, --no-toc-only, --no-mask-email, --no-enum-title, --no-rc", " --no-css-sugar, --no-quiet", '', _("Example:\n %s -t html --toc myfile.t2t") % my_name, '', _("By default, converted output is saved to 'infile.<target>'."), _("Use --outfile to force an output file name."), _("If input file is '-', reads from STDIN."), _("If output file is '-', dumps output to STDOUT."), '' ], '\n') ############################################################################## # here is all the target's templates # you may edit them to fit your needs # - the %(HEADERn)s strings represent the Header lines # - the %(STYLE)s string is changed by --style contents # - the %(ENCODING)s string is changed by --encoding contents # - if any of the above is empty, the full line is removed # - use %% to represent a literal % # HEADER_TEMPLATE = { 'txt': """\ %(HEADER1)s %(HEADER2)s %(HEADER3)s """, 'sgml': """\ <!doctype linuxdoc system> <article> <title>%(HEADER1)s <author>%(HEADER2)s <date>%(HEADER3)s """, 'html': """\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="generator" CONTENT="http://txt2tags.sf.net"> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s"> <LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s"> <TITLE>%(HEADER1)s</TITLE> </HEAD><BODY BGCOLOR="white" TEXT="black"> <P ALIGN="center"><CENTER><H1>%(HEADER1)s</H1> <FONT SIZE="4"> <I>%(HEADER2)s</I><BR> %(HEADER3)s </FONT></CENTER> """, 'htmlcss': """\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="generator" CONTENT="http://txt2tags.sf.net"> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s"> <LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s"> <TITLE>%(HEADER1)s</TITLE> </HEAD> <BODY> <DIV CLASS="header" ID="header"> <H1>%(HEADER1)s</H1> <H2>%(HEADER2)s</H2> <H3>%(HEADER3)s</H3> </DIV> """, 'xhtml': """\ <?xml version="1.0"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>%(HEADER1)s</title> <meta name="generator" content="http://txt2tags.sf.net" /> <meta http-equiv="Content-Type" content="text/html; charset=%(ENCODING)s" /> <link rel="stylesheet" type="text/css" href="%(STYLE)s" /> </head> <body bgcolor="white" text="black"> <div align="center"> <h1>%(HEADER1)s</h1> <h2>%(HEADER2)s</h2> <h3>%(HEADER3)s</h3> </div> """, 'xhtmlcss': """\ <?xml version="1.0"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>%(HEADER1)s</title> <meta name="generator" content="http://txt2tags.sf.net" /> <meta http-equiv="Content-Type" content="text/html; charset=%(ENCODING)s" /> <link rel="stylesheet" type="text/css" href="%(STYLE)s" /> </head> <body> <div class="header" id="header"> <h1>%(HEADER1)s</h1> <h2>%(HEADER2)s</h2> <h3>%(HEADER3)s</h3> </div> """, 'man': """\ .TH "%(HEADER1)s" 1 "%(HEADER3)s" "%(HEADER2)s" """, # TODO style to <HR> 'pm6': """\ <PMTags1.0 win><C-COLORTABLE ("Preto" 1 0 0 0) ><@Normal= <FONT "Times New Roman"><CCOLOR "Preto"><SIZE 11> <HORIZONTAL 100><LETTERSPACE 0><CTRACK 127><CSSIZE 70><C+SIZE 58.3> <C-POSITION 33.3><C+POSITION 33.3><P><CBASELINE 0><CNOBREAK 0><CLEADING -0.05> <GGRID 0><GLEFT 7.2><GRIGHT 0><GFIRST 0><G+BEFORE 7.2><G+AFTER 0> <GALIGNMENT "justify"><GMETHOD "proportional"><G& "ENGLISH"> <GPAIRS 12><G%% 120><GKNEXT 0><GKWIDOW 0><GKORPHAN 0><GTABS $> <GHYPHENATION 2 34 0><GWORDSPACE 75 100 150><GSPACE -5 0 25> ><@Bullet=<@-PARENT "Normal"><FONT "Abadi MT Condensed Light"> <GLEFT 14.4><G+BEFORE 2.15><G%% 110><GTABS(25.2 l "")> ><@PreFormat=<@-PARENT "Normal"><FONT "Lucida Console"><SIZE 8><CTRACK 0> <GLEFT 0><G+BEFORE 0><GALIGNMENT "left"><GWORDSPACE 100 100 100><GSPACE 0 0 0> ><@Title1=<@-PARENT "Normal"><FONT "Arial"><SIZE 14><B> <GCONTENTS><GLEFT 0><G+BEFORE 0><GALIGNMENT "left"> ><@Title2=<@-PARENT "Title1"><SIZE 12><G+BEFORE 3.6> ><@Title3=<@-PARENT "Title1"><SIZE 10><GLEFT 7.2><G+BEFORE 7.2> ><@Title4=<@-PARENT "Title3"> ><@Title5=<@-PARENT "Title3"> ><@Quote=<@-PARENT "Normal"><SIZE 10><I>> %(HEADER1)s %(HEADER2)s %(HEADER3)s """, 'mgp': """\ #!/usr/X11R6/bin/mgp -t 90 %%deffont "normal" xfont "utopia-medium-r", charset "iso8859-1" %%deffont "normal-i" xfont "utopia-medium-i", charset "iso8859-1" %%deffont "normal-b" xfont "utopia-bold-r" , charset "iso8859-1" %%deffont "normal-bi" xfont "utopia-bold-i" , charset "iso8859-1" %%deffont "mono" xfont "courier-medium-r", charset "iso8859-1" %%default 1 size 5 %%default 2 size 8, fore "yellow", font "normal-b", center %%default 3 size 5, fore "white", font "normal", left, prefix " " %%tab 1 size 4, vgap 30, prefix " ", icon arc "red" 40, leftfill %%tab 2 prefix " ", icon arc "orange" 40, leftfill %%tab 3 prefix " ", icon arc "brown" 40, leftfill %%tab 4 prefix " ", icon arc "darkmagenta" 40, leftfill %%tab 5 prefix " ", icon arc "magenta" 40, leftfill %%%%------------------------- end of headers ----------------------------- %%page %%size 10, center, fore "yellow" %(HEADER1)s %%font "normal-i", size 6, fore "white", center %(HEADER2)s %%font "mono", size 7, center %(HEADER3)s """, # TODO please, improve me! 'moin': """\ '''%(HEADER1)s''' ''%(HEADER2)s'' %(HEADER3)s """, 'tex': \ r"""\documentclass[11pt,a4paper]{article} \usepackage{amsfonts,graphicx,url} \usepackage[%(ENCODING)s]{inputenc} %% char encoding \usepackage{%(STYLE)s} %% user defined package \pagestyle{plain} %% do page numbering ('empty' turns off) \frenchspacing %% no aditional spaces after periods \setlength{\parskip}{8pt}\parindent=0pt %% no paragraph indentation %% uncomment next line for fancy PDF output on Adobe Acrobat Reader %%\usepackage[pdfstartview=FitV,colorlinks=true,bookmarks=true]{hyperref} \title{%(HEADER1)s} \author{%(HEADER2)s} \begin{document} \date{%(HEADER3)s} \maketitle \clearpage """ } ############################################################################## def getTags(config): "Returns all the known tags for the specified target" keys = [ 'paragraphOpen','paragraphClose', 'title1','title2','title3','title4','title5', 'numtitle1','numtitle2','numtitle3','numtitle4','numtitle5', 'blockVerbOpen','blockVerbClose', 'blockQuoteOpen','blockQuoteClose','blockQuoteLine', 'fontMonoOpen','fontMonoClose', 'fontBoldOpen','fontBoldClose', 'fontItalicOpen','fontItalicClose', 'fontUnderlineOpen','fontUnderlineClose', 'listOpen','listClose', 'listItemOpen','listItemClose','listItemLine', 'numlistOpen','numlistClose', 'numlistItemOpen','numlistItemClose','numlistItemLine', 'deflistOpen','deflistClose', 'deflistItem1Open','deflistItem1Close', 'deflistItem2Open','deflistItem2Close', 'bar1','bar2', 'url','urlMark','email','emailMark', 'img', 'tableOpen','tableClose', 'tableRowOpen','tableRowClose','tableRowSep', 'tableCellOpen','tableCellClose','tableCellSep', 'tableTitleCellOpen','tableTitleCellClose','tableTitleCellSep', 'tableTitleRowOpen','tableTitleRowClose', 'tableBorder', 'tableAlignLeft', 'tableAlignCenter', 'tableCellAlignLeft','tableCellAlignRight','tableCellAlignCenter', 'tableColAlignLeft','tableColAlignRight','tableColAlignCenter', 'tableColAlignSep', 'anchor','comment','pageBreak', 'TOC','tocOpen','tocClose', 'bodyOpen','bodyClose', 'EOD' ] alltags = { 'txt': { 'title1' : ' \a' , 'title2' : '\t\a' , 'title3' : '\t\t\a' , 'title4' : '\t\t\t\a' , 'title5' : '\t\t\t\t\a', 'blockQuoteLine' : '\t' , 'listItemOpen' : '- ' , 'numlistItemOpen' : '\a. ' , 'bar1' : '\a' , 'bar2' : '\a' , 'url' : '\a' , 'urlMark' : '\a (\a)' , 'email' : '\a' , 'emailMark' : '\a (\a)' , 'img' : '[\a]' , }, 'html': { 'paragraphOpen' : '<P>' , 'paragraphClose' : '</P>' , 'title1' : '~A~<H1>\a</H1>' , 'title2' : '~A~<H2>\a</H2>' , 'title3' : '~A~<H3>\a</H3>' , 'title4' : '~A~<H4>\a</H4>' , 'title5' : '~A~<H5>\a</H5>' , 'blockVerbOpen' : '<PRE>' , 'blockVerbClose' : '</PRE>' , 'blockQuoteOpen' : '<BLOCKQUOTE>' , 'blockQuoteClose' : '</BLOCKQUOTE>' , 'fontMonoOpen' : '<CODE>' , 'fontMonoClose' : '</CODE>' , 'fontBoldOpen' : '<B>' , 'fontBoldClose' : '</B>' , 'fontItalicOpen' : '<I>' , 'fontItalicClose' : '</I>' , 'fontUnderlineOpen' : '<U>' , 'fontUnderlineClose' : '</U>' , 'listOpen' : '<UL>' , 'listClose' : '</UL>' , 'listItemOpen' : '<LI>' , 'numlistOpen' : '<OL>' , 'numlistClose' : '</OL>' , 'numlistItemOpen' : '<LI>' , 'deflistOpen' : '<DL>' , 'deflistClose' : '</DL>' , 'deflistItem1Open' : '<DT>' , 'deflistItem1Close' : '</DT>' , 'deflistItem2Open' : '<DD>' , 'bar1' : '<HR NOSHADE SIZE=1>' , 'bar2' : '<HR NOSHADE SIZE=5>' , 'url' : '<A HREF="\a">\a</A>' , 'urlMark' : '<A HREF="\a">\a</A>' , 'email' : '<A HREF="mailto:\a">\a</A>' , 'emailMark' : '<A HREF="mailto:\a">\a</A>' , 'img' :'<IMG ALIGN="~A~" SRC="\a" BORDER="0" ALT="">', 'tableOpen' : '<TABLE~A~ CELLPADDING="4"~B~>', 'tableClose' : '</TABLE>' , 'tableRowOpen' : '<TR>' , 'tableRowClose' : '</TR>' , 'tableCellOpen' : '<TD\a>' , 'tableCellClose' : '</TD>' , 'tableTitleCellOpen' : '<TH>' , 'tableTitleCellClose' : '</TH>' , 'tableBorder' : ' BORDER="1"' , 'tableAlignCenter' : ' ALIGN="center"', 'tableCellAlignRight' : ' ALIGN="right"' , 'tableCellAlignCenter': ' ALIGN="center"', 'anchor' : '<A NAME="\a"></A>\n', 'comment' : '<!-- \a -->' , 'EOD' : '</BODY></HTML>' }, #TIP xhtml inherits all HTML definitions (lowercased) #TIP http://www.w3.org/TR/xhtml1/#guidelines #TIP http://www.htmlref.com/samples/Chapt17/17_08.htm 'xhtml': { 'listItemClose' : '</li>' , 'numlistItemClose' : '</li>' , 'deflistItem2Close' : '</dd>' , 'bar1' : '<hr class="light" />', 'bar2' : '<hr class="heavy" />', 'anchor' : '<a id="\a" name="\a"></a>\n', 'img' :'<img align="~A~" src="\a" border="0" alt=""/>', }, 'sgml': { 'paragraphOpen' : '<p>' , 'title1' : '<sect>\a~A~<p>' , 'title2' : '<sect1>\a~A~<p>' , 'title3' : '<sect2>\a~A~<p>' , 'title4' : '<sect3>\a~A~<p>' , 'title5' : '<sect4>\a~A~<p>' , 'blockVerbOpen' : '<tscreen><verb>' , 'blockVerbClose' : '</verb></tscreen>' , 'blockQuoteOpen' : '<quote>' , 'blockQuoteClose' : '</quote>' , 'fontMonoOpen' : '<tt>' , 'fontMonoClose' : '</tt>' , 'fontBoldOpen' : '<bf>' , 'fontBoldClose' : '</bf>' , 'fontItalicOpen' : '<em>' , 'fontItalicClose' : '</em>' , 'fontUnderlineOpen' : '<bf><em>' , 'fontUnderlineClose' : '</em></bf>' , 'listOpen' : '<itemize>' , 'listClose' : '</itemize>' , 'listItemOpen' : '<item>' , 'numlistOpen' : '<enum>' , 'numlistClose' : '</enum>' , 'numlistItemOpen' : '<item>' , 'deflistOpen' : '<descrip>' , 'deflistClose' : '</descrip>' , 'deflistItem1Open' : '<tag>' , 'deflistItem1Close' : '</tag>' , 'bar1' : '<!-- \a -->' , 'bar2' : '<!-- \a -->' , 'url' : '<htmlurl url="\a" name="\a">' , 'urlMark' : '<htmlurl url="\a" name="\a">' , 'email' : '<htmlurl url="mailto:\a" name="\a">' , 'emailMark' : '<htmlurl url="mailto:\a" name="\a">' , 'img' : '<figure><ph vspace=""><img src="\a">'+\ '</figure>' , 'tableOpen' : '<table><tabular ca="~C~">' , 'tableClose' : '</tabular></table>' , 'tableRowSep' : '<rowsep>' , 'tableCellSep' : '<colsep>' , 'tableColAlignLeft' : 'l' , 'tableColAlignRight' : 'r' , 'tableColAlignCenter' : 'c' , 'comment' : '<!-- \a -->' , 'anchor' : '<label id="\a">' , 'TOC' : '<toc>' , 'EOD' : '</article>' }, 'tex': { 'title1' : '\n\section*{\a}', 'title2' : '\\subsection*{\a}' , 'title3' : '\\subsubsection*{\a}' , # title 4/5: DIRTY: para+BF+\\+\n 'title4' : '\\paragraph{}\\textbf{\a}\\\\\n', 'title5' : '\\paragraph{}\\textbf{\a}\\\\\n', 'numtitle1' : '\n\section{\a}', 'numtitle2' : '\\subsection{\a}' , 'numtitle3' : '\\subsubsection{\a}' , 'blockVerbOpen' : '\\begin{verbatim}' , 'blockVerbClose' : '\\end{verbatim}' , 'blockQuoteOpen' : '\\begin{quotation}' , 'blockQuoteClose' : '\\end{quotation}' , 'fontMonoOpen' : '\\texttt{' , 'fontMonoClose' : '}' , 'fontBoldOpen' : '\\textbf{' , 'fontBoldClose' : '}' , 'fontItalicOpen' : '\\textit{' , 'fontItalicClose' : '}' , 'fontUnderlineOpen' : '\\underline{' , 'fontUnderlineClose' : '}' , 'listOpen' : '\\begin{itemize}' , 'listClose' : '\\end{itemize}' , 'listItemOpen' : '\\item ' , 'numlistOpen' : '\\begin{enumerate}' , 'numlistClose' : '\\end{enumerate}' , 'numlistItemOpen' : '\\item ' , 'deflistOpen' : '\\begin{description}', 'deflistClose' : '\\end{description}' , 'deflistItem1Open' : '\\item[' , 'deflistItem1Close' : ']' , 'bar1' : '\n\\hrulefill{}\n' , 'bar2' : '\n\\rule{\linewidth}{1mm}\n', 'url' : '\\url{\a}' , 'urlMark' : '\\textit{\a} (\\url{\a})' , 'email' : '\\url{\a}' , 'emailMark' : '\\textit{\a} (\\url{\a})' , 'img' : '\\includegraphics{\a}', 'tableOpen' : '\\begin{center}\\begin{tabular}{|~C~|}', 'tableClose' : '\\end{tabular}\\end{center}', 'tableRowOpen' : '\\hline ' , 'tableRowClose' : ' \\\\' , 'tableCellSep' : ' & ' , 'tableColAlignLeft' : 'l' , 'tableColAlignRight' : 'r' , 'tableColAlignCenter' : 'c' , 'tableColAlignSep' : '|' , 'comment' : '% \a' , 'TOC' : '\\tableofcontents', 'pageBreak' : '\\clearpage', 'EOD' : '\\end{document}' }, 'moin': { 'title1' : '= \a =' , 'title2' : '== \a ==' , 'title3' : '=== \a ===' , 'title4' : '==== \a ====' , 'title5' : '===== \a =====', 'blockVerbOpen' : '{{{' , 'blockVerbClose' : '}}}' , 'blockQuoteLine' : ' ' , 'fontMonoOpen' : '{{{' , 'fontMonoClose' : '}}}' , 'fontBoldOpen' : "'''" , 'fontBoldClose' : "'''" , 'fontItalicOpen' : "''" , 'fontItalicClose' : "''" , 'fontUnderlineOpen' : "__" , 'fontUnderlineClose' : "__" , 'listItemOpen' : ' * ' , 'numlistItemOpen' : ' \a. ' , 'bar1' : '----' , 'bar2' : '----' , 'url' : '[\a]' , 'urlMark' : '[\a \a]' , 'email' : '[\a]' , 'emailMark' : '[\a \a]' , 'img' : '[\a]' , 'tableRowOpen' : '||' , 'tableCellOpen' : '\a' , 'tableCellClose' : '||' , 'tableTitleCellClose' : '||' , 'tableCellAlignRight' : '<)>' , 'tableCellAlignCenter': '<:>' , 'comment' : '## \a' , 'TOC' : '[[TableOfContents]]' }, 'mgp': { 'paragraphOpen' : '%font "normal", size 5' , 'title1' : '%page\n\n\a\n' , 'title2' : '%page\n\n\a\n' , 'title3' : '%page\n\n\a\n' , 'title4' : '%page\n\n\a\n' , 'title5' : '%page\n\n\a\n' , 'blockVerbOpen' : '%font "mono"' , 'blockVerbClose' : '%font "normal"' , 'blockQuoteOpen' : '%prefix " "' , 'blockQuoteClose' : '%prefix " "' , 'fontMonoOpen' : '\n%cont, font "mono"\n' , 'fontMonoClose' : '\n%cont, font "normal"\n' , 'fontBoldOpen' : '\n%cont, font "normal-b"\n' , 'fontBoldClose' : '\n%cont, font "normal"\n' , 'fontItalicOpen' : '\n%cont, font "normal-i"\n' , 'fontItalicClose' : '\n%cont, font "normal"\n' , 'fontUnderlineOpen' : '\n%cont, fore "cyan"\n' , 'fontUnderlineClose' : '\n%cont, fore "white"\n' , 'listItemLine' : '\t' , 'numlistItemLine' : '\t' , 'deflistItem1Open' : '\t\n%cont, font "normal-b"\n', 'deflistItem1Close' : '\n%cont, font "normal"\n' , 'bar1' : '%bar "white" 5' , 'bar2' : '%pause' , 'url' : '\n%cont, fore "cyan"\n\a' +\ '\n%cont, fore "white"\n' , 'urlMark' : '\a \n%cont, fore "cyan"\n\a'+\ '\n%cont, fore "white"\n' , 'email' : '\n%cont, fore "cyan"\n\a' +\ '\n%cont, fore "white"\n' , 'emailMark' : '\a \n%cont, fore "cyan"\n\a'+\ '\n%cont, fore "white"\n' , 'img' : '\n%~A~\n%newimage "\a"\n%left\n', 'comment' : '%% \a' , 'pageBreak' : '%page\n\n\n' , 'EOD' : '%%EOD' }, # man groff_man ; man 7 groff 'man': { 'paragraphOpen' : '.P' , 'title1' : '.SH \a' , 'title2' : '.SS \a' , 'title3' : '.SS \a' , 'title4' : '.SS \a' , 'title5' : '.SS \a' , 'blockVerbOpen' : '.nf' , 'blockVerbClose' : '.fi\n' , 'blockQuoteOpen' : '.RS' , 'blockQuoteClose' : '.RE' , 'fontBoldOpen' : '\\fB' , 'fontBoldClose' : '\\fR' , 'fontItalicOpen' : '\\fI' , 'fontItalicClose' : '\\fR' , 'listOpen' : '.RS' , 'listItemOpen' : '.IP \(bu 3\n', 'listClose' : '.RE' , 'numlistOpen' : '.RS' , 'numlistItemOpen' : '.IP \a. 3\n', 'numlistClose' : '.RE' , 'deflistItem1Open' : '.TP\n' , 'bar1' : '\n\n' , 'bar2' : '\n\n' , 'url' : '\a' , 'urlMark' : '\a (\a)', 'email' : '\a' , 'emailMark' : '\a (\a)', 'img' : '\a' , 'tableOpen' : '.TS\n~A~~B~tab(^); ~C~.', 'tableClose' : '.TE' , 'tableRowOpen' : ' ' , 'tableCellSep' : '^' , 'tableAlignCenter' : 'center, ', 'tableBorder' : 'allbox, ', 'tableColAlignLeft' : 'l' , 'tableColAlignRight' : 'r' , 'tableColAlignCenter' : 'c' , 'comment' : '.\\" \a' }, 'pm6': { 'paragraphOpen' : '<@Normal:>' , 'title1' : '\n<@Title1:>\a', 'title2' : '\n<@Title2:>\a', 'title3' : '\n<@Title3:>\a', 'title4' : '\n<@Title4:>\a', 'title5' : '\n<@Title5:>\a', 'blockVerbOpen' : '<@PreFormat:>' , 'blockQuoteLine' : '<@Quote:>' , 'fontMonoOpen' : '<FONT "Lucida Console"><SIZE 9>' , 'fontMonoClose' : '<SIZE$><FONT$>', 'fontBoldOpen' : '<B>' , 'fontBoldClose' : '<P>' , 'fontItalicOpen' : '<I>' , 'fontItalicClose' : '<P>' , 'fontUnderlineOpen' : '<U>' , 'fontUnderlineClose' : '<P>' , 'listOpen' : '<@Bullet:>' , 'listItemOpen' : '\x95\t' , # \x95 == ~U 'numlistOpen' : '<@Bullet:>' , 'numlistItemOpen' : '\x95\t' , 'bar1' : '\a' , 'bar2' : '\a' , 'url' : '<U>\a<P>' , # underline 'urlMark' : '\a <U>\a<P>' , 'email' : '\a' , 'emailMark' : '\a \a' , 'img' : '\a' } } # exceptions for --css-sugar if config['css-sugar'] and config['target'] in ('html','xhtml'): # change just HTML because XHTML inherits it htmltags = alltags['html'] # table with no cellpadding htmltags['tableOpen'] = string.replace( htmltags['tableOpen'], ' CELLPADDING="4"', '') # DIVs htmltags['tocOpen' ] = '<DIV CLASS="toc" ID="toc">' htmltags['tocClose'] = '</DIV>' htmltags['bodyOpen'] = '<DIV CLASS="body" ID="body">' htmltags['bodyClose']= '</DIV>' # make the HTML -> XHTML inheritance xhtml = alltags['html'].copy() for key in xhtml.keys(): xhtml[key] = string.lower(xhtml[key]) # some like HTML tags as lowercase, some don't... (headers out) if HTML_LOWER: alltags['html'] = xhtml.copy() xhtml.update(alltags['xhtml']) alltags['xhtml'] = xhtml.copy() # compose the target tags dictionary tags = {} target_tags = alltags[config['target']].copy() for key in keys: tags[key] = '' # create empty keys for key in target_tags.keys(): tags[key] = maskEscapeChar(target_tags[key]) # populate return tags ############################################################################## def getRules(config): "Returns all the target-specific syntax rules" ret = {} allrules = [ # target rules (ON/OFF) 'linkable', # target supports external links 'tableable', # target supports tables 'imglinkable', # target supports images as links 'imgalignable', # target supports image alignment 'imgasdefterm', # target supports image as definition term 'autonumberlist', # target supports numbered lists natively 'autonumbertitle', # target supports numbered titles natively 'parainsidelist', # lists items supports paragraph 'spacedlistitem', # lists support blank lines between items 'listnotnested', # lists cannot be nested 'quotenotnested', # quotes cannot be nested 'verbblocknotescaped', # don't escape specials in verb block 'verbblockfinalescape', # do final escapes in verb block 'escapeurl', # escape special in link URL 'onelinepara', # dump paragraph as a single long line 'tabletitlerowinbold', # manually bold any cell on table titles 'tablecellstrip', # strip extra spaces from each table cell 'barinsidequote', # bars are allowed inside quote blocks 'finalescapetitle', # perform final escapes on title lines 'autotocnewpagebefore', # break page before automatic TOC 'autotocnewpageafter', # break page after automatic TOC 'autotocwithbars', # automatic TOC surrounded by bars # target code beautify (ON/OFF) 'indentverbblock', # add leading spaces to verb block lines 'breaktablecell', # break lines after any table cell 'breaktablelineopen', # break line after opening table line 'notbreaklistopen', # don't break line after opening a new list 'notbreakparaopen', # don't break line after opening a new para 'keepquoteindent', # don't remove the leading TABs on quotes 'keeplistindent', # don't remove the leading spaces on lists 'blankendmotherlist', # append a blank line at the mother list end 'blankendtable', # append a blank line at the table end 'blankendautotoc', # append a blank line at the auto TOC end 'tagnotindentable', # tags must be placed at the line begining # value settings 'listmaxdepth', # maximum depth for lists 'tablecellaligntype' # type of table cell align: cell, column ] rules_bank = { 'txt' : { 'indentverbblock':1, 'spacedlistitem':1, 'parainsidelist':1, 'keeplistindent':1, 'barinsidequote':1, 'autotocwithbars':1, 'blankendmotherlist':1 }, 'html': { 'indentverbblock':1, 'linkable':1, 'escapeurl':1, 'imglinkable':1, 'imgalignable':1, 'imgasdefterm':1, 'autonumberlist':1, 'spacedlistitem':1, 'parainsidelist':1, 'blankendmotherlist':1, 'tableable':1, 'tablecellstrip':1, 'blankendtable':1, 'breaktablecell':1, 'breaktablelineopen':1, 'keeplistindent':1, 'keepquoteindent':1, 'barinsidequote':1, 'autotocwithbars':1, 'tablecellaligntype':'cell' }, #TIP xhtml inherits all HTML rules 'xhtml': { }, 'sgml': { 'linkable':1, 'escapeurl':1, 'autonumberlist':1, 'spacedlistitem':1, 'blankendmotherlist':1, 'tableable':1, 'tablecellstrip':1, 'blankendtable':1, 'blankendautotoc':1, 'quotenotnested':1, 'keeplistindent':1, 'keepquoteindent':1, 'barinsidequote':1, 'finalescapetitle':1, 'tablecellaligntype':'column' }, 'mgp' : { 'blankendmotherlist':1, 'tagnotindentable':1, 'spacedlistitem':1, 'imgalignable':1, 'autotocnewpagebefore':1, }, 'tex' : { 'autonumberlist':1, 'autonumbertitle':1, 'spacedlistitem':1, 'blankendmotherlist':1, 'tableable':1, 'tablecellstrip':1, 'tabletitlerowinbold':1, 'blankendtable':1, 'verbblocknotescaped':1, 'keeplistindent':1, 'listmaxdepth':4, 'barinsidequote':1, 'finalescapetitle':1, 'autotocnewpageafter':1, 'tablecellaligntype':'column' }, 'moin': { 'spacedlistitem':1, 'linkable':1, 'blankendmotherlist':1, 'keeplistindent':1, 'tableable':1, 'barinsidequote':1, 'blankendtable':1, 'tabletitlerowinbold':1, 'tablecellstrip':1, 'autotocwithbars':1, 'tablecellaligntype':'cell' }, 'man' : { 'spacedlistitem':1, 'indentverbblock':1, 'blankendmotherlist':1, 'tagnotindentable':1, 'tableable':1, 'tablecellaligntype':'column', 'tabletitlerowinbold':1, 'tablecellstrip':1, 'blankendtable':1, 'keeplistindent':0, 'barinsidequote':1, 'parainsidelist':0, }, 'pm6' : { 'keeplistindent':1, 'verbblockfinalescape':1, #TODO add support for these - maybe set a JOINNEXT char and # do it on addLineBreaks() 'notbreaklistopen':1, 'notbreakparaopen':1, 'barinsidequote':1, 'autotocwithbars':1, 'onelinepara':1, } } # exceptions for --css-sugar if config['css-sugar'] and config['target'] in ('html','xhtml'): rules_bank['html']['indentverbblock'] = 0 rules_bank['html']['autotocwithbars'] = 0 # get the target specific rules if config['target'] == 'xhtml': myrules = rules_bank['html'].copy() # inheritance myrules.update(rules_bank['xhtml']) # get XHTML specific else: myrules = rules_bank[config['target']].copy() # populate return dictionary for key in allrules: ret[key] = 0 # reset all ret.update(myrules) # get rules return ret ############################################################################## def getRegexes(): "Returns all the regexes used to find the t2t marks" bank = { 'blockVerbOpen': re.compile(r'^```\s*$'), 'blockVerbClose': re.compile(r'^```\s*$'), 'blockRawOpen': re.compile(r'^"""\s*$'), 'blockRawClose': re.compile(r'^"""\s*$'), 'quote': re.compile(r'^\t+'), '1lineVerb': re.compile(r'^``` (?=.)'), '1lineRaw': re.compile(r'^""" (?=.)'), # mono, raw, bold, italic, underline: # - marks must be glued with the contents, no boundary spaces # - they are greedy, so in ****bold****, turns to <b>**bold**</b> 'fontMono': re.compile( r'``([^\s](|.*?[^\s])`*)``'), 'raw': re.compile( r'""([^\s](|.*?[^\s])"*)""'), 'fontBold': re.compile(r'\*\*([^\s](|.*?[^\s])\**)\*\*'), 'fontItalic': re.compile( r'//([^\s](|.*?[^\s])/*)//'), 'fontUnderline': re.compile( r'__([^\s](|.*?[^\s])_*)__'), 'list': re.compile(r'^( *)(-) (?=[^ ])'), 'numlist': re.compile(r'^( *)(\+) (?=[^ ])'), 'deflist': re.compile(r'^( *)(:) (.*)$'), 'listclose': re.compile(r'^( *)([-+:])\s*$'), 'bar': re.compile(r'^(\s*)([_=-]{20,})\s*$'), 'table': re.compile(r'^ *\|\|? '), 'blankline': re.compile(r'^\s*$'), 'comment': re.compile(r'^%'), # auxiliar tag regexes '_imgAlign' : re.compile(r'~A~',re.I), '_tableAlign' : re.compile(r'~A~',re.I), '_anchor' : re.compile(r'~A~',re.I), '_tableBorder' : re.compile(r'~B~',re.I), '_tableColAlign': re.compile(r'~C~',re.I), } # special char to place data on TAGs contents (\a == bell) bank['x'] = re.compile('\a') # %%macroname [ (formatting) ] bank['macros'] = re.compile(r'%%%%(?P<name>%s)\b(\((?P<fmt>.*?)\))?'%( string.join(MACROS.keys(), '|')), re.I) # %%TOC special macro for TOC positioning bank['toc'] = re.compile(r'^ *%%toc\s*$', re.I) # almost complicated title regexes ;) titskel = r'^ *(?P<id>%s)(?P<txt>%s)\1(\[(?P<label>[\w-]*)\])?\s*$' bank[ 'title'] = re.compile(titskel%('[=]{1,5}','[^=](|.*[^=])')) bank['numtitle'] = re.compile(titskel%('[+]{1,5}','[^+](|.*[^+])')) ### complicated regexes begin here ;) # # textual descriptions on --help's style: [...] is optional, | is OR ### first, some auxiliar variables # # [image.EXT] patt_img = r'\[([\w_,.+%$#@!?+~/-]+\.(png|jpe?g|gif|eps|bmp))\]' # link things urlskel = { 'proto' : r'(https?|ftp|news|telnet|gopher|wais)://', 'guess' : r'(www[23]?|ftp)\.', # w/out proto, try to guess 'login' : r'A-Za-z0-9_.-', # for ftp://[email protected] 'pass' : r'[^ @]*', # for ftp://login:[email protected] 'chars' : r'A-Za-z0-9%._/~:,=$@&+-', # %20(space), :80(port), D&D 'anchor': r'A-Za-z0-9%._-', # %nn(encoded) 'form' : r'A-Za-z0-9/%&=+;.,$@*_-', # .,@*_-(as is) 'punct' : r'.,;:!?' } # username [ :password ] @ patt_url_login = r'([%s]+(:%s)?@)?'%(urlskel['login'],urlskel['pass']) # [ http:// ] [ username:password@ ] domain.com [ / ] # [ #anchor | ?form=data ] retxt_url = r'\b(%s%s|%s)[%s]+\b/*(\?[%s]+)?(#[%s]+)?'%( urlskel['proto'],patt_url_login, urlskel['guess'], urlskel['chars'],urlskel['form'],urlskel['anchor']) # filename | [ filename ] #anchor retxt_url_local = r'[%s]+|[%s]*(#[%s]+)'%( urlskel['chars'],urlskel['chars'],urlskel['anchor']) # user@domain [ ?form=data ] patt_email = r'\b[%s]+@([A-Za-z0-9_-]+\.)+[A-Za-z]{2,4}\b(\?[%s]+)?'%( urlskel['login'],urlskel['form']) # saving for future use bank['_urlskel'] = urlskel ### and now the real regexes # bank['email'] = re.compile(patt_email,re.I) # email | url bank['link'] = re.compile(r'%s|%s'%(retxt_url,patt_email), re.I) # \[ label | imagetag url | email | filename \] bank['linkmark'] = re.compile( r'\[(?P<label>%s|[^]]+) (?P<link>%s|%s|%s)\]'%( patt_img, retxt_url, patt_email, retxt_url_local), re.L+re.I) # image bank['img'] = re.compile(patt_img, re.L+re.I) # special things bank['special'] = re.compile(r'^%!\s*') return bank ### END OF regex nightmares ############################################################################## def echo(msg): # for quick debug print '\033[32;1m%s\033[m'%msg def Quit(msg, exitcode=0): print msg sys.exit(exitcode) def Error(msg): sys.stderr.write(_("%s: Error: ")%my_name + "%s\n"%msg) sys.stderr.flush() sys.exit(1) def ShowTraceback(): try: from traceback import print_exc print_exc() ; print ; print except: pass def Message(msg,level): if level <= VERBOSE and not QUIET: prefix = '-'*5 print "%s %s"%(prefix*level, msg) def Debug(msg,color=0,linenr=None): "0gray=init,1red=conf,3yellow=line,6cyan=block,2green=detail,5pink=gui" if QUIET or not DEBUG: return if COLOR_DEBUG: msg = '\033[3%s;1m%s\033[m'%(color,msg) if linenr is not None: msg = "LINE %04d: %s"%(linenr,msg) print "** %s"%msg def Readfile(file, remove_linebreaks=0): if file == '-': try: data = sys.stdin.readlines() except: Error(_('You must feed me with data on STDIN!')) else: try: f = open(file); data = f.readlines() ; f.close() except: Error(_("Cannot read file:")+"\n %s"%file) if remove_linebreaks: data = map(lambda x:re.sub('[\n\r]+$','',x), data) Message(_("Readed file (%d lines): %s")%(len(data),file),2) return data def Savefile(file, contents): try: f = open(file, 'wb') except: Error(_("Cannot open file for writing:")+"\n %s"%file) if type(contents) == type([]): doit = f.writelines else: doit = f.write doit(contents) ; f.close() def showdic(dic): for k in dic.keys(): print "%15s : %s" % (k,dic[k]) def dotted_spaces(txt=''): return string.replace(txt,' ','.') def get_rc_path(): "Return the full path for the users' RC file" rc_file = RC # search the RC dir on the specified system variables # TIP: win: http://www.winnetmag.com/Article/ArticleID/23873/23873.html rc_dir_search = ['HOME', 'HOMEPATH'] for var in rc_dir_search: rc_dir = os.environ.get(var) if rc_dir: break if rc_dir: # compose path and return it if the file exists rc_path = os.path.join(rc_dir, rc_file) # on windows, prefix with the drive (%homedrive%: 2k/XP/NT) if sys.platform[:3] == 'win': rc_drive = os.environ.get('HOMEDRIVE') rc_path = os.path.join(rc_drive,rc_path) return rc_path return '' ############################################################################## class CommandLine: """ Command Line class - Masters command line This class checks and extract data from the provided command line. The --long options and flags are taken from the global OPTIONS, FLAGS and ACTIONS dictionaries. The short options are registered here, and also their equivalence to the long ones. METHODS: _compose_short_opts() -> str _compose_long_opts() -> list Compose the valid short and long options list, on the 'getopt' format. parse() -> (opts, args) Call getopt to check and parse the command line. It expects to receive the command line as a list, and without the program name (sys.argv[1:]). get_raw_config() -> [RAW config] Scans command line and convert the data to the RAW config format. See ConfigMaster class to the RAW format description. Optional 'ignore' and 'filter' arguments are used to filter in or out specified keys. compose_cmdline(dict) -> [Command line] Compose a command line list from an already parsed config dictionary, generated from RAW by ConfigMaster(). Use this to compose an optimal command line for a group of options. The get_raw_config() calls parse(), so the tipical use of this class is: raw = CommandLine().get_raw_config(sys.argv[1:]) """ def __init__(self): self.all_options = OPTIONS.keys() self.all_flags = FLAGS.keys() self.all_actions = ACTIONS.keys() # short:long options equivalence self.short_long = { 'h':'help' , 'V':'version', 'n':'enum-title', 'i':'infile' , 'H':'no-headers', 'o':'outfile', 'v':'verbose' , 't':'target' , 'q':'quiet' } # compose valid short and long options data for getopt self.short_opts = self._compose_short_opts() self.long_opts = self._compose_long_opts() def _compose_short_opts(self): "Returns a string like 'hVt:o' with all short options/flags" ret = [] for opt in self.short_long.keys(): long = self.short_long[opt] if long in self.all_options: # is flag or option? opt = opt+':' # option: have param ret.append(opt) Debug('Valid SHORT options: %s'%ret) return string.join(ret, '') def _compose_long_opts(self): "Returns a list with all the valid long options/flags" ret = map(lambda x:x+'=', self.all_options) # add = ret.extend(self.all_flags) # flag ON ret.extend(self.all_actions) # acts ret.extend(map(lambda x:'no-'+x, self.all_flags)) # add no-* ret.extend(['no-style']) # turn OFF option ret.extend(['no-encoding']) # turn OFF option ret.extend(['no-outfile']) # turn OFF option Debug('Valid LONG options: %s'%ret) return ret def _tokenize(self, cmd_string=''): "Convert a command line string to a list" #TODO protect quotes contents return string.split(cmd_string) def parse(self, cmdline=[]): "Check/Parse a command line list TIP: no program name!" # get the valid options short, long = self.short_opts, self.long_opts # parse it! try: opts, args = getopt.getopt(cmdline, short, long) except getopt.error, errmsg: Error(_("%s (try --help)")%errmsg) return (opts, args) def get_raw_config(self, cmdline=[], ignore=[], filter=[]): "Returns the options/arguments found as RAW config" if not cmdline: return [] ret = [] # we need lists, not strings if type(cmdline) == type(''): cmdline = self._tokenize(cmdline) Debug("cmdline: %s"%cmdline) opts, args = self.parse(cmdline[:]) # get infile, if any while args: infile = args.pop(0) ret.append(['infile', infile]) # parse all options for name,value in opts: # remove leading - and -- name = re.sub('^--?', '', name) # alias to old mispelled 'suGGar' if name == 'css-suggar': name = 'css-sugar' elif name == 'no-css-suggar': name = 'no-css-sugar' # translate short opt to long if len(name) == 1: name = self.short_long.get(name) # save it ret.append([name, value]) # apply 'ignore' and 'filter' rules (filter is stronger) temp = ret[:] ; ret = [] for name,value in temp: if (not filter and not ignore) or \ (filter and name in filter) or \ (ignore and name not in ignore): ret.append( ['all', name, value] ) # add the original command line string as 'realcmdline' ret.append( ['all', 'realcmdline', cmdline] ) return ret def compose_cmdline(self, conf={}, no_check=0): "compose a full (and diet) command line from CONF dict" if not conf: return [] args = [] dft_options = OPTIONS.copy() cfg = conf.copy() valid_opts = self.all_options + self.all_flags use_short = {'no-headers':'H', 'enum-title':'n'} # remove useless options if not no_check and cfg.get('toc-only'): if cfg.has_key('no-headers'): del cfg['no-headers'] if cfg.has_key('outfile'): del cfg['outfile'] # defaults to STDOUT if cfg.get('target') == 'txt': del cfg['target'] # already default args.append('--toc-only') # must be the first del cfg['toc-only'] # add target type if cfg.has_key('target'): args.append('-t '+cfg['target']) del cfg['target'] # add other options for key in cfg.keys(): if key not in valid_opts: continue # may be a %!setting if key in ['outfile','infile']: continue # later val = cfg[key] if not val: continue # default values are useless on cmdline if val == dft_options.get(key): continue # -short format if key in use_short.keys(): args.append('-'+use_short[key]) continue # --long format if key in self.all_flags: # add --option args.append('--'+key) else: # add --option=value args.append('--%s=%s'%(key,val)) # the outfile using -o if cfg.has_key('outfile') and \ cfg['outfile'] != dft_options.get('outfile'): args.append('-o '+cfg['outfile']) # place input file(s) always at the end if cfg.has_key('infile'): args.append(string.join(cfg['infile'],' ')) # return as a nice list Debug("Diet command line: %s"%string.join(args,' '), 1) return args ############################################################################## class SourceDocument: """ SourceDocument class - scan document structure, extract data It knows about full files. It reads a file and identify all the areas begining (Head,Conf,Body). With this info it can extract each area contents. Note: the original line break is removed. DATA: self.arearef - Save Head, Conf, Body init line number self.areas - Store the area names which are not empty self.buffer - The full file contents (with NO \\r, \\n) METHODS: get() - Access the contents of an Area. Example: config = SourceDocument(file).get('conf') split() - Get all the document Areas at once. Example: head, conf, body = SourceDocument(file).split() RULES: * The document parts are sequential: Head, Conf and Body. * One ends when the next begins. * The Conf Area is optional, so a document can have just Head and Body Areas. These are the Areas limits: - Head Area: the first three lines - Body Area: from the first valid text line to the end - Conf Area: the comments between Head and Body Areas Exception: If the first line is blank, this means no header info, so the Head Area is just the first line. """ def __init__(self, filename=''): self.areas = ['head','conf','body'] self.arearef = [] self.areas_fancy = '' self.filename = filename self.buffer = [] if filename: self.scan(filename) def split(self): "Returns all document parts, splitted into lists." return self.get('head'), self.get('conf'), self.get('body') def get(self, areaname): "Returns head|conf|body contents from self.buffer" # sanity if areaname not in self.areas: return [] if not self.buffer : return [] # go get it bufini = 1 bufend = len(self.buffer) if areaname == 'head': ini = bufini end = self.arearef[1] or self.arearef[2] or bufend elif areaname == 'conf': ini = self.arearef[1] end = self.arearef[2] or bufend elif areaname == 'body': ini = self.arearef[2] end = bufend else: Error("Unknown Area name '%s'"%areaname) lines = self.buffer[ini:end] # make sure head will always have 3 lines while areaname == 'head' and len(lines) < 3: lines.append('') return lines def scan(self, filename): "Run through source file and identify head/conf/body areas" Debug("source file: %s"%filename) Message(_("Loading source document"),1) buf = Readfile(filename, remove_linebreaks=1) if len(buf) == 0: Error(_('The input file is empty: %s')%filename) cfg_parser = ConfigLines().parse_line buf.insert(0, '') # text start at pos 1 ref = [1,4,0] if not string.strip(buf[1]): # no header ref[0] = 0 ; ref[1] = 2 rgx = getRegexes() for i in range(ref[1],len(buf)): # find body init: if string.strip(buf[i]) and ( # ... not blank and buf[i][0] != '%' or # ... not comment or rgx['macros'].match(buf[i]) or # ... %%macro rgx['toc'].match(buf[i]) or # ... %%toc cfg_parser(buf[i],'include')[1]): # ... %!include ref[2] = i ; break if ref[1] == ref[2]: ref[1] = 0 # no conf area for i in 0,1,2: # del !existent if ref[i] >= len(buf): ref[i] = 0 # title-only if not ref[i]: self.areas[i] = '' Debug('Head,Conf,Body start line: %s'%ref) self.arearef = ref # save results self.buffer = buf # fancyness sample: head conf body (1 4 8) self.areas_fancy = "%s (%s)"%( string.join(self.areas), string.join(map(str, map(lambda x:x or '', ref)))) Message(_("Areas found: %s")%self.areas_fancy, 2) def get_raw_config(self): "Handy method to get the CONF area RAW config (if any)" if not self.areas.count('conf'): return [] Message(_("Scanning source document CONF area"),1) raw = ConfigLines( file=self.filename, lines=self.get('conf'), first_line=self.arearef[1]).get_raw_config() Debug("document raw config: %s"%raw, 1) return raw ############################################################################## class ConfigMaster: """ ConfigMaster class - the configuration wizard This class is the configuration master. It knows how to handle the RAW and PARSED config format. It also performs the sanity checkings for a given configuration. DATA: self.raw - Stores the config on the RAW format self.parsed - Stores the config on the PARSED format self.defaults - Stores the default values for all keys self.off - Stores the OFF values for all keys self.multi - List of keys which can have multiple values self.numeric - List of keys which value must be a number self.incremental - List of keys which are incremental RAW FORMAT: The RAW format is a list of lists, being each mother list item a full configuration entry. Any entry is a 3 item list, on the following format: [ TARGET, KEY, VALUE ] Being a list, the order is preserved, so it's easy to use different kinds of configs, as CONF area and command line, respecting the precedence. The special target 'all' is used when no specific target was defined on the original config. PARSED FORMAT: The PARSED format is a dictionary, with all the 'key : value' found by reading the RAW config. The self.target contents matters, so this dictionary only contains the target's config. The configs of other targets are ignored. The CommandLine and ConfigLines classes have the get_raw_config() method which convert the configuration found to the RAW format. Just feed it to parse() and get a brand-new ready-to-use config dictionary. Example: >>> raw = CommandLine().get_raw_config(['-n', '-H']) >>> print raw [['all', 'enum-title', ''], ['all', 'no-headers', '']] >>> parsed = ConfigMaster(raw).parse() >>> print parsed {'enum-title': 1, 'headers': 0} """ def __init__(self, raw=[], target=''): self.raw = raw self.target = target self.parsed = {} self.dft_options = OPTIONS.copy() self.dft_flags = FLAGS.copy() self.dft_actions = ACTIONS.copy() self.dft_settings = SETTINGS.copy() self.defaults = self._get_defaults() self.off = self._get_off() self.multi = ['infile', 'options','preproc','postproc'] self.incremental = ['verbose'] self.numeric = ['toc-level','split'] def _get_defaults(self): "Get the default values for all config/options/flags" empty = {} for kw in CONFIG_KEYWORDS: empty[kw] = '' empty.update(self.dft_options) empty.update(self.dft_flags) empty.update(self.dft_actions) empty.update(self.dft_settings) empty['realcmdline'] = '' # internal use only empty['sourcefile'] = '' # internal use only return empty def _get_off(self): "Turns OFF all the config/options/flags" off = {} for key in self.defaults.keys(): kind = type(self.defaults[key]) if kind == type(9): off[key] = 0 elif kind == type(''): off[key] = '' elif kind == type([]): off[key] = [] else: Error('ConfigMaster: %s: Unknown type'+key) return off def _check_target(self): "Checks if the target is already defined. If not, do it" if not self.target: self.target = self.find_value('target') def get_target_raw(self): "Returns the raw config for self.target or 'all'" ret = [] self._check_target() for entry in self.raw: if entry[0] in [self.target, 'all']: ret.append(entry) return ret def add(self, key, val): "Adds the key:value pair to the config dictionary (if needed)" # %!options if key == 'options': ignoreme = self.dft_actions.keys() + ['target'] raw_opts = CommandLine().get_raw_config( val, ignore=ignoreme) for target, key, val in raw_opts: self.add(key, val) return # the no- prefix turns OFF this key if key[:3] == 'no-': key = key[3:] # remove prefix val = self.off.get(key) # turn key OFF # is this key valid? if key not in self.defaults.keys(): Debug('Bogus Config %s:%s'%(key,val),1) return # is this value the default one? if val == self.defaults.get(key): # if default value, remove previous key:val if self.parsed.has_key(key): del self.parsed[key] # nothing more to do return # flags ON comes empty. we'll add the 1 value now if val == '' and \ key in self.dft_flags.keys()+self.dft_actions.keys(): val = 1 # multi value or single? if key in self.multi: # first one? start new list if not self.parsed.has_key(key): self.parsed[key] = [] self.parsed[key].append(val) # incremental value? so let's add it elif key in self.incremental: self.parsed[key] = (self.parsed.get(key) or 0) + val else: self.parsed[key] = val fancykey = dotted_spaces("%12s"%key) Message(_("Added config %s : %s")%(fancykey,val),3) def get_outfile_name(self, config={}): "Dirname is the same for {in,out}file" infile, outfile = config['sourcefile'], config['outfile'] if infile == STDIN and not outfile: outfile = STDOUT if not outfile and (infile and config.get('target')): basename = re.sub('\.(txt|t2t)$','',infile) outfile = "%s.%s"%(basename, config['target']) Debug(" infile: '%s'"%infile , 1) Debug("outfile: '%s'"%outfile, 1) return outfile def sanity(self, config, gui=0): "Basic config sanity checkings" if not config: return {} target = config.get('target') # --toc-only doesn't require target specification if not target and config.get('toc-only'): target = 'txt' # on GUI, some checkings are skipped if not gui: # we *need* a target if not target: Error(_('No target specified (try --help)')+\ '\n\n'+\ _('Maybe trying to convert an old v1.x file?')) # and of course, an infile also if not config['infile']: Error(_('Missing input file (try --help)')) # is the target valid? if not TARGETS.count(target): Error(_("Invalid target '%s' (try --help)" )%target) # ensure all keys are present empty = self.defaults.copy() ; empty.update(config) config = empty.copy() # check integers options for key in config.keys(): if key in self.numeric: try: config[key] = int(config[key]) except: Error(_('--%s value must be a number' )%key) # check split level value if config['split'] not in [0,1,2]: Error(_('Option --split must be 0, 1 or 2')) # --toc-only is stronger than others if config['toc-only']: config['headers'] = 0 config['toc'] = 0 config['split'] = 0 config['gui'] = 0 config['outfile'] = config['outfile'] or STDOUT # splitting is disable for now (future: HTML only, no STDOUT) config['split'] = 0 # restore target config['target'] = target # set output file name config['outfile'] = self.get_outfile_name(config) # checking suicide if config['sourcefile'] == config['outfile'] and \ config['outfile'] != STDOUT and not gui: Error(_("Input and Output files are the same: %s")%( config['outfile'])) return config def parse(self): "Returns the parsed config for the current target" raw = self.get_target_raw() for target, key, value in raw: self.add(key, value) Message(_("Added the following keys: %s")%string.join( self.parsed.keys(),', '),2) return self.parsed.copy() def find_value(self, key='', target=''): "Scans ALL raw config to find the desired key" ret = [] # scan and save all values found for targ, k, val in self.raw: if targ in [target, 'all'] and k == key: ret.append(val) if not ret: return '' # if not multi value, return only the last found if key in self.multi: return ret else : return ret[-1] ######################################################################## class ConfigLines: """ ConfigLines class - the config file data extractor This class reads and parse the config lines on the %!key:val format, converting it to RAW config. It deals with user config file (RC file), source document CONF area and %!includeconf directives. Call it passing a file name or feed the desired config lines. Then just call the get_raw_config() method and wait to receive the full config data on the RAW format. This method also follows the possible %!includeconf directives found on the config lines. Example: raw = ConfigLines(file=".txt2tagsrc").get_raw_config() The parse_line() method is also useful to be used alone, to identify and tokenize a single config line. For example, to get the %!include command components, on the source document BODY: target, key, value = ConfigLines().parse_line(body_line) """ def __init__(self, file='', lines=[], first_line=1): self.file = file or 'NOFILE' self.lines = lines self.first_line = first_line def load_lines(self): "Make sure we've loaded the file contents into buffer" if not self.lines and not self.file: Error("ConfigLines: No file or lines provided") if not self.lines: self.lines = self.read_config_file(self.file) def read_config_file(self, filename=''): "Read a Config File contents, aborting on invalid line" if not filename: return [] errormsg = _("Invalid CONFIG line on %s")+"\n%03d:%s" lines = Readfile(filename, remove_linebreaks=1) # sanity: try to find invalid config lines for i in range(len(lines)): line = string.rstrip(lines[i]) if not line: continue # empty if line[0] != '%': Error(errormsg%(filename,i+1,line)) return lines def include_config_file(self, file=''): "Perform the %!includeconf action, returning RAW config" if not file: return [] # current dir relative to the current file (self.file) current_dir = os.path.dirname(self.file) file = os.path.join(current_dir, file) # read and parse included config file contents lines = self.read_config_file(file) return ConfigLines(file=file, lines=lines).get_raw_config() def get_raw_config(self): "Scan buffer and extract all config as RAW (including includes)" ret = [] self.load_lines() first = self.first_line for i in range(len(self.lines)): line = self.lines[i] Message(_("Processing line %03d: %s")%(first+i,line),2) target, key, val = self.parse_line(line) if not key: continue # no config on this line if key == 'includeconf': more_raw = self.include_config_file(val) ret.extend(more_raw) Message(_("Finished Config file inclusion: %s" )%(val),2) else: ret.append([target, key, val]) Message(_("Added %s")%key,3) return ret def parse_line(self, line='', keyname='', target=''): "Detects %!key:val config lines and extract data from it" empty = ['', '', ''] if not line: return empty no_target = ['target', 'includeconf'] re_name = keyname or '[a-z]+' re_target = target or '[a-z]*' cfgregex = re.compile(""" ^%%!\s* # leading id with opt spaces (?P<name>%s)\s* # config name (\((?P<target>%s)\))? # optional target spec inside () \s*:\s* # key:value delimiter with opt spaces (?P<value>\S.+?) # config value \s*$ # rstrip() spaces and hit EOL """%(re_name,re_target), re.I+re.VERBOSE) prepostregex = re.compile(""" # ---[ PATTERN ]--- ^( "([^"]*)" # "double quoted" or | '([^']*)' # 'single quoted' or | ([^\s]+) # single_word ) \s+ # separated by spaces # ---[ REPLACE ]--- ( "([^"]*)" # "double quoted" or | '([^']*)' # 'single quoted' or | (.*) # anything ) \s*$ """, re.VERBOSE) guicolors = re.compile("^([^\s]+\s+){3}[^\s]+") # 4 tokens match = cfgregex.match(line) if not match: return empty name = string.lower(match.group('name') or '') target = string.lower(match.group('target') or 'all') value = match.group('value') # NO target keywords: force all targets if name in no_target: target = 'all' # special config for GUI colors if name == 'guicolors': valmatch = guicolors.search(value) if not valmatch: return empty value = re.split('\s+', value) # Special config with two quoted values (%!preproc: "foo" 'bar') if name in ['preproc','postproc']: valmatch = prepostregex.search(value) if not valmatch: return empty getval = valmatch.group patt = getval(2) or getval(3) or getval(4) or '' repl = getval(6) or getval(7) or getval(8) or '' value = (patt, repl) return [target, name, value] ############################################################################## class MaskMaster: "(Un)Protect important structures from escaping and formatting" def __init__(self): self.linkmask = '@@@LINK@@@' self.monomask = '@@@MONO@@@' self.macromask = '@@@MACRO@@@' self.rawmask = '@@@RAW@@@' self.tocmask = '@@@TOC@@@' self.macroman = MacroMaster() self.reset() def reset(self): self.linkbank = [] self.monobank = [] self.macrobank = [] self.rawbank = [] def mask(self, line=''): global AUTOTOC # protect raw text while regex['raw'].search(line): txt = regex['raw'].search(line).group(1) txt = doEscape(TARGET,txt) self.rawbank.append(txt) line = regex['raw'].sub(self.rawmask,line,1) # protect pre-formatted font text while regex['fontMono'].search(line): txt = regex['fontMono'].search(line).group(1) txt = doEscape(TARGET,txt) self.monobank.append(txt) line = regex['fontMono'].sub(self.monomask,line,1) # protect macros while regex['macros'].search(line): txt = regex['macros'].search(line).group() self.macrobank.append(txt) line = regex['macros'].sub(self.macromask,line,1) # protect TOC location while regex['toc'].search(line): line = regex['toc'].sub(self.tocmask,line) AUTOTOC = 0 # protect URLs and emails while regex['linkmark'].search(line) or \ regex['link' ].search(line): # try to match plain or named links match_link = regex['link'].search(line) match_named = regex['linkmark'].search(line) # define the current match if match_link and match_named: # both types found, which is the first? m = match_link if match_named.start() < match_link.start(): m = match_named else: # just one type found, we're fine m = match_link or match_named # extract link data and apply mask if m == match_link: # plain link link = m.group() label = '' link_re = regex['link'] else: # named link link = m.group('link') label = string.rstrip(m.group('label')) link_re = regex['linkmark'] line = link_re.sub(self.linkmask,line,1) # save link data to the link bank self.linkbank.append((label, link)) return line def undo(self, line): # url & email for label,url in self.linkbank: link = get_tagged_link(label, url) line = string.replace(line, self.linkmask, link, 1) # expand macros for macro in self.macrobank: macro = self.macroman.expand(macro) line = string.replace(line, self.macromask, macro,1) # expand verb for mono in self.monobank: open,close = TAGS['fontMonoOpen'],TAGS['fontMonoClose'] tagged = open+mono+close line = string.replace(line,self.monomask,tagged,1) # expand raw for raw in self.rawbank: line = string.replace(line,self.rawmask,raw,1) return line ############################################################################## class TitleMaster: "Title things" def __init__(self): self.count = ['',0,0,0,0,0] self.toc = [] self.level = 0 self.kind = '' self.txt = '' self.label = '' self.tag = '' self.count_id = '' self.user_labels = {} self.anchor_count = 0 self.anchor_prefix = 'toc' def add(self, line): "Parses a new title line." if not line: return self._set_prop(line) self._set_count_id() self._set_label() self._save_toc_info() def _save_toc_info(self): "Save TOC info, used by self.dump_marked_toc()" self.toc.append((self.level, self.count_id, self.txt , self.label )) def _set_prop(self, line=''): "Extract info from original line and set data holders." # detect title type (numbered or not) id = string.lstrip(line)[0] if id == '=': kind = 'title' elif id == '+': kind = 'numtitle' else: Error("Unknown Title ID '%s'"%id) # extract line info match = regex[kind].search(line) level = len(match.group('id')) txt = string.strip(match.group('txt')) label = match.group('label') # parse info & save if CONF['enum-title']: kind = 'numtitle' # force self.tag = TAGS[kind+`level`] or TAGS['title'+`level`] self.kind = kind self.level = level self.txt = txt self.label = label def _set_count_id(self): "Compose and save the title count identifier (if needed)." count_id = '' if self.kind == 'numtitle' and not rules['autonumbertitle']: # manually increase title count self.count[self.level] = self.count[self.level] +1 # reset sublevels count (if any) max_levels = len(self.count) if self.level < max_levels-1: for i in range(self.level+1, max_levels): self.count[i] = 0 # compose count id from hierarchy for i in range(self.level): count_id= "%s%d."%(count_id, self.count[i+1]) self.count_id = count_id def _set_label(self): "Compose and save title label, used by anchors." # remove invalid chars from label set by user self.label = re.sub('[^A-Za-z0-9_-]', '', self.label or '') # generate name as 15 first :alnum: chars #TODO how to translate safely accented chars to plain? #self.label = re.sub('[^A-Za-z0-9]', '', self.txt)[:15] # 'tocN' label - sequential count, ignoring 'toc-level' #self.label = self.anchor_prefix + str(len(self.toc)+1) def _get_tagged_anchor(self): "Return anchor if user defined a label, or TOC is on." ret = '' label = self.label if CONF['toc'] and self.level <= CONF['toc-level']: # this count is needed bcos self.toc stores all # titles, regardless of the 'toc-level' setting, # so we can't use self.toc lenght to number anchors self.anchor_count = self.anchor_count + 1 # autonumber label (if needed) label = label or '%s%s'%( self.anchor_prefix, self.anchor_count) if label and TAGS['anchor']: ret = regex['x'].sub(label,TAGS['anchor']) return ret def _get_full_title_text(self): "Returns the full title contents, already escaped." ret = self.txt # insert count_id (if any) before text if self.count_id: ret = '%s %s'%(self.count_id, ret) # escape specials ret = doEscape(TARGET, ret) # same targets needs final escapes on title lines # it's here because there is a 'continue' after title if rules['finalescapetitle']: ret = doFinalEscape(TARGET, ret) return ret def get(self): "Returns the tagged title as a list." ret = [] # maybe some anchoring before? anchor = self._get_tagged_anchor() self.tag = regex['_anchor'].sub(anchor, self.tag) ### compose & escape title text (TOC uses unescaped) full_title = self._get_full_title_text() # finish title, adding "underline" on TXT target tagged = regex['x'].sub(full_title, self.tag) if TARGET == 'txt': ret.append('') # blank line before ret.append(tagged) ret.append(regex['x'].sub('='*len(full_title),self.tag)) ret.append('') # blank line after else: ret.append(tagged) return ret def dump_marked_toc(self, max_level=99): "Dumps all toc itens as a valid t2t markup list" #TODO maybe use quote+linebreaks instead lists ret = [] toc_count = 1 for level, count_id, txt, label in self.toc: if level > max_level: continue # ignore indent = ' '*level id_txt = string.lstrip('%s %s'%(count_id, txt)) label = label or self.anchor_prefix+`toc_count` toc_count = toc_count + 1 # TOC will have links if TAGS['anchor']: # TOC is more readable with master topics # not linked at number. This is a stoled # idea from Windows .CHM help files if CONF['enum-title'] and level == 1: tocitem = '%s+ [""%s"" #%s]'%( indent, txt, label) else: tocitem = '%s- [""%s"" #%s]'%( indent, id_txt, label) # no links on TOC, just text else: # man don't reformat TOC lines, cool! if TARGET in ['txt', 'man']: tocitem = '%s""%s""' %( indent, id_txt) else: tocitem = '%s- ""%s""'%( indent, id_txt) ret.append(tocitem) return ret ############################################################################## #TODO check all this table mess # trata linhas TABLE, com as prop do parse_row # o metodo table() do BLOCK xunxa e troca as celulas pelas parseadas class TableMaster: def __init__(self, line=''): self.rows = [] self.border = 0 self.align = 'Left' self.cellalign = [] if line: prop = self.parse_row(line) self.border = prop['border'] self.align = prop['align'] self.cellalign = prop['cellalign'] def _get_open_tag(self): topen = TAGS['tableOpen'] tborder = TAGS['tableBorder'] talign = TAGS['tableAlign'+self.align] calignsep = TAGS['tableColAlignSep'] calign = '' # the first line defines if table has border or not if not self.border: tborder = '' # set the columns alignment if rules['tablecellaligntype'] == 'column': calign = map(lambda x: TAGS['tableColAlign%s'%x], self.cellalign) calign = string.join(calign, calignsep) # align full table, set border and Column align (if any) topen = regex['_tableAlign' ].sub(talign , topen) topen = regex['_tableBorder' ].sub(tborder, topen) topen = regex['_tableColAlign'].sub(calign , topen) # tex table spec, border or not: {|l|c|r|} , {lcr} if calignsep and not self.border: # remove cell align separator topen = string.replace(topen, calignsep, '') return topen def _get_cell_align(self, cells): ret = [] for cell in cells: align = 'Left' if string.strip(cell): if cell[0] == ' ' and cell[-1] == ' ': align = 'Center' elif cell[0] == ' ': align = 'Right' ret.append(align) return ret def _tag_cells(self, rowdata): row = [] cells = rowdata['cells'] open = TAGS['tableCellOpen'] close = TAGS['tableCellClose'] sep = TAGS['tableCellSep'] calign = map(lambda x: TAGS['tableCellAlign'+x], rowdata['cellalign']) # maybe is it a title row? if rowdata['title']: open = TAGS['tableTitleCellOpen'] or open close = TAGS['tableTitleCellClose'] or close sep = TAGS['tableTitleCellSep'] or sep # should we break the line on *each* table cell? if rules['breaktablecell']: close = close+'\n' # cells pre processing if rules['tablecellstrip']: cells = map(lambda x: string.strip(x), cells) if rowdata['title'] and rules['tabletitlerowinbold']: cells = map(lambda x: enclose_me('fontBold',x), cells) # add cell BEGIN/END tags for cell in cells: # insert cell align into open tag (if cell is alignable) if rules['tablecellaligntype'] == 'cell': copen = string.replace(open,'\a',calign.pop(0)) else: copen = open row.append(copen + cell + close) # maybe there are cell separators? return string.join(row, sep) def add_row(self, cells): self.rows.append(cells) def parse_row(self, line): # default table proprierties ret = {'border':0,'title':0,'align':'Left', 'cells':[],'cellalign':[]} # detect table align (and remove spaces mark) if line[0] == ' ': ret['align'] = 'Center' line = string.lstrip(line) # detect title mark if line[1] == '|': ret['title'] = 1 # delete trailing spaces after last cell border line = re.sub('\|\s*$','|', line) # detect (and delete) border mark (and leading space) if line[-1] == '|': ret['border'] = 1 ; line = line[:-2] # delete table mark line = regex['table'].sub('', line) # split cells ret['cells'] = string.split(line, ' | ') # find cells align ret['cellalign'] = self._get_cell_align(ret['cells']) Debug('Table Prop: %s' % ret, 2) return ret def dump(self): open = self._get_open_tag() rows = self.rows close = TAGS['tableClose'] rowopen = TAGS['tableRowOpen'] rowclose = TAGS['tableRowClose'] rowsep = TAGS['tableRowSep'] titrowopen = TAGS['tableTitleRowOpen'] or rowopen titrowclose = TAGS['tableTitleRowClose'] or rowclose if rules['breaktablelineopen']: rowopen = rowopen + '\n' titrowopen = titrowopen + '\n' # tex gotchas if TARGET == 'tex': if not self.border: rowopen = titrowopen = '' else: close = rowopen + close # now we tag all the table cells on each row #tagged_cells = map(lambda x: self._tag_cells(x), rows) #!py15 tagged_cells = [] for cell in rows: tagged_cells.append(self._tag_cells(cell)) # add row separator tags between lines tagged_rows = [] if rowsep: #!py15 #tagged_rows = map(lambda x:x+rowsep, tagged_cells) for cell in tagged_cells: tagged_rows.append(cell+rowsep) # remove last rowsep, because the table is over tagged_rows[-1] = string.replace( tagged_rows[-1], rowsep, '') # add row BEGIN/END tags for each line else: for rowdata in rows: if rowdata['title']: o,c = titrowopen, titrowclose else: o,c = rowopen, rowclose row = tagged_cells.pop(0) tagged_rows.append(o + row + c) fulltable = [open] + tagged_rows + [close] if rules['blankendtable']: fulltable.append('') return fulltable ############################################################################## class BlockMaster: "TIP: use blockin/out to add/del holders" def __init__(self): self.BLK = [] self.HLD = [] self.PRP = [] self.depth = 0 self.last = '' self.tableparser = None self.contains = { 'para' :['passthru','raw'], 'verb' :[], 'table' :[], 'raw' :[], 'passthru':[], 'quote' :['quote','passthru','raw'], 'list' :['list' ,'numlist' ,'deflist','para','verb', 'raw' ,'passthru'], 'numlist' :['list' ,'numlist' ,'deflist','para','verb', 'raw' ,'passthru'], 'deflist' :['list' ,'numlist' ,'deflist','para','verb', 'raw' ,'passthru'] } self.allblocks = self.contains.keys() def block(self): if not self.BLK: return '' return self.BLK[-1] def isblock(self, name=''): return self.block() == name def prop(self, key): if not self.PRP: return '' return self.PRP[-1].get(key) or '' def propset(self, key, val): self.PRP[-1][key] = val #Debug('BLOCK prop ++: %s->%s'%(key,repr(val)), 1) #Debug('BLOCK props: %s'%(repr(self.PRP)), 1) def hold(self): if not self.HLD: return [] return self.HLD[-1] def holdadd(self, line): if self.block()[-4:] == 'list': line = [line] self.HLD[-1].append(line) Debug('HOLD add: %s'%repr(line), 5) Debug('FULL HOLD: %s'%self.HLD, 2) def holdaddsub(self, line): self.HLD[-1][-1].append(line) Debug('HOLD addsub: %s'%repr(line), 5) Debug('FULL HOLD: %s'%self.HLD, 2) def holdextend(self, lines): if self.block()[-4:] == 'list': lines = [lines] self.HLD[-1].extend(lines) Debug('HOLD extend: %s'%repr(lines), 5) Debug('FULL HOLD: %s'%self.HLD, 2) def blockin(self, block): ret = [] if block not in self.allblocks: Error("Invalid block '%s'"%block) # first, let's close other possible open blocks while self.block() and block not in self.contains[self.block()]: ret.extend(self.blockout()) # now we can gladly add this new one self.BLK.append(block) self.HLD.append([]) self.PRP.append({}) if block == 'table': self.tableparser = TableMaster() # deeper and deeper self.depth = len(self.BLK) Debug('block ++ (%s): %s' % (block,self.BLK), 6) return ret def blockout(self): if not self.BLK: Error('No block to pop') self.last = self.BLK.pop() tagged = getattr(self, self.last)() parsed = self.HLD.pop() self.PRP.pop() self.depth = len(self.BLK) if self.last == 'table': del self.tableparser # inserting a nested block into mother if self.block(): if self.block()[-4:] == 'list': self.HLD[-1][-1].append(tagged) else: self.HLD[-1].append(tagged) tagged = [] # reset. mother will have it all Debug('block -- (%s): %s' % (self.last,self.BLK), 6) Debug('RELEASED (%s): %s' % (self.last,parsed), 6) if tagged: Debug('DUMPED: %s'%tagged, 2) return tagged def _last_escapes(self, line): return doFinalEscape(TARGET, line) def _get_escaped_hold(self): ret = [] for line in self.hold(): linetype = type(line) if linetype == type(''): ret.append(self._last_escapes(line)) elif linetype == type([]): ret.extend(line) else: Error("BlockMaster: Unknown HOLD item type:" " %s"%linetype) return ret def _remove_twoblanks(self, lastitem): if len(lastitem) > 1 and lastitem[-2:] == ['','']: return lastitem[:-2] return lastitem def passthru(self): return self.hold() def raw(self): lines = self.hold() return map(lambda x: doEscape(TARGET, x), lines) def para(self): tagged = [] open = TAGS['paragraphOpen'] close = TAGS['paragraphClose'] lines = self._get_escaped_hold() # open (or not) paragraph if not open+close and self.last == 'para': pass # avoids multiple blank lines else: tagged.append(open) # pagemaker likes a paragraph as a single long line if rules['onelinepara']: tagged.append(string.join(lines,' ')) # others are normal :) else: tagged.extend(lines) tagged.append(close) # very very very very very very very very very UGLY fix # needed because <center> can't appear inside <p> try: if len(lines) == 1 and \ TARGET in ('html', 'xhtml') and \ re.match('^\s*<center>.*</center>\s*$', lines[0]): tagged = [lines[0]] except: pass return tagged def verb(self): "Verbatim lines are not masked, so there's no need to unmask" tagged = [] tagged.append(TAGS['blockVerbOpen']) for line in self.hold(): if self.prop('mapped') == 'table': line = MacroMaster().expand(line) if not rules['verbblocknotescaped']: line = doEscape(TARGET,line) if rules['indentverbblock']: line = ' '+line if rules['verbblockfinalescape']: line = doFinalEscape(TARGET, line) tagged.append(line) #TODO maybe use if not TAGS['blockVerbClose'] if TARGET != 'pm6': tagged.append(TAGS['blockVerbClose']) return tagged def table(self): # rewrite all table cells by the unmasked and escaped data lines = self._get_escaped_hold() for i in range(len(lines)): cells = string.split(lines[i], SEPARATOR) self.tableparser.rows[i]['cells'] = cells return self.tableparser.dump() def quote(self): tagged = [] myre = regex['quote'] open = TAGS['blockQuoteOpen'] # block based close = TAGS['blockQuoteClose'] qline = TAGS['blockQuoteLine'] # line based indent = tagindent = '\t'*self.depth if rules['tagnotindentable']: tagindent = '' if not rules['keepquoteindent']: indent = '' if open: tagged.append(tagindent+open) # open block for item in self.hold(): if type(item) == type([]): tagged.extend(item) # subquotes else: item = myre.sub('', item) # del TABs if rules['barinsidequote']: item = get_tagged_bar(item) item = self._last_escapes(item) item = qline*self.depth + item tagged.append(indent+item) # quote line if close: tagged.append(tagindent+close) # close block return tagged def deflist(self): return self.list('deflist') def numlist(self): return self.list('numlist') def list(self, name='list'): tagged = [] items = self.hold() indent = self.prop('indent') tagindent = indent listopen = TAGS.get(name+'Open') listclose = TAGS.get(name+'Close') listline = TAGS.get(name+'ItemLine') itemcount = 0 if rules['tagnotindentable']: tagindent = '' if not rules['keeplistindent']: indent = '' if name == 'deflist': itemopen = TAGS[name+'Item1Open'] itemclose = TAGS[name+'Item2Close'] itemsep = TAGS[name+'Item1Close']+\ TAGS[name+'Item2Open'] else: itemopen = TAGS[name+'ItemOpen'] itemclose = TAGS[name+'ItemClose'] itemsep = '' # ItemLine: number of leading chars identifies list depth if listline: itemopen = listline*self.depth # dirty fix for mgp if name == 'numlist': itemopen = itemopen + '\a. ' # remove two-blanks from list ending mark, to avoid <p> items[-1] = self._remove_twoblanks(items[-1]) # open list (not nestable lists are only opened at mother) if listopen and not \ (rules['listnotnested'] and BLOCK.depth != 1): tagged.append(tagindent+listopen) # tag each list item (multine items) itemopenorig = itemopen for item in items: # add "manual" item count for noautonum targets itemcount = itemcount + 1 if name == 'numlist' and not rules['autonumberlist']: n = str(itemcount) itemopen = regex['x'].sub(n, itemopenorig) del n item[0] = self._last_escapes(item[0]) if name == 'deflist': term, rest = string.split(item[0],SEPARATOR,1) item[0] = rest if not item[0]: del item[0] # to avoid <p> tagged.append(tagindent+itemopen+term+itemsep) else: fullitem = tagindent+itemopen tagged.append(string.replace( item[0], SEPARATOR, fullitem)) del item[0] # process next lines for this item (if any) for line in item: if type(line) == type([]): # sublist inside tagged.extend(line) else: line = self._last_escapes(line) # blank lines turns to <p> if not line and rules['parainsidelist']: line = string.rstrip(indent +\ TAGS['paragraphOpen']+\ TAGS['paragraphClose']) if not rules['keeplistindent']: line = string.lstrip(line) tagged.append(line) # close item (if needed) if itemclose: tagged.append(tagindent+itemclose) # close list (not nestable lists are only closed at mother) if listclose and not \ (rules['listnotnested'] and BLOCK.depth != 1): tagged.append(tagindent+listclose) if rules['blankendmotherlist'] and BLOCK.depth == 1: tagged.append('') return tagged ############################################################################## class MacroMaster: def __init__(self, config={}): self.name = '' self.config = config or CONF self.infile = self.config['sourcefile'] self.outfile = self.config['outfile'] self.currdate = time.localtime(time.time()) self.rgx = regex.get('macros') or getRegexes()['macros'] self.fileinfo = { 'infile': None, 'outfile': None } self.dft_fmt = MACROS def walk_file_format(self, fmt): "Walks the %%{in/out}file format string, expanding the % flags" i = 0; ret = '' # counter/hold while i < len(fmt): # char by char c = fmt[i]; i = i + 1 if c == '%': # hot char! if i == len(fmt): # % at the end ret = ret + c break c = fmt[i]; i = i + 1 # read next ret = ret + self.expand_file_flag(c) else: ret = ret +c # common char return ret def expand_file_flag(self, flag): "%f: filename %F: filename (w/o extension)" "%d: dirname %D: dirname (only parent dir)" "%p: file path %e: extension" info = self.fileinfo[self.name] # get dict if flag == '%': x = '%' # %% -> % elif flag == 'f': x = info['name'] elif flag == 'F': x = re.sub('\.[^.]*$','',info['name']) elif flag == 'd': x = info['dir'] elif flag == 'D': x = os.path.split(info['dir'])[-1] elif flag == 'p': x = info['path'] elif flag == 'e': x = re.search('.(\.([^.]+))?$',info['name'] ).group(2) or '' #TODO simplier way for %e ? else : x = '%'+flag # false alarm return x def set_file_info(self, macroname): if self.fileinfo.get(macroname): return # already done file = getattr(self, self.name) # self.infile if file == STDOUT: dir = ''; path = name = STDOUT else: path = os.path.abspath(file) dir = os.path.dirname(path) name = os.path.basename(path) self.fileinfo[macroname] = {'path':path,'dir':dir,'name':name} def expand(self, line=''): "Expand all macros found on the line" while self.rgx.search(line): m = self.rgx.search(line) name = self.name = m.group('name') fmt = m.group('fmt') or self.dft_fmt.get(name) if name == 'date': txt = time.strftime(fmt,self.currdate) elif name == 'mtime': if self.infile == STDIN: fdate = self.currdate else: mtime = os.path.getmtime(self.infile) fdate = time.localtime(mtime) txt = time.strftime(fmt,fdate) elif name in ['infile','outfile']: self.set_file_info(name) txt = self.walk_file_format(fmt) else: Error("Unknown macro name '%s'"%name) line = self.rgx.sub(txt,line,1) return line ############################################################################## def dumpConfig(source_raw, parsed_config): onoff = {1:_('ON'), 0:_('OFF')} data = [ (_('RC file') , RC_RAW ), (_('source document'), source_raw ), (_('command line') , CMDLINE_RAW) ] # first show all RAW data found for label, cfg in data: print _('RAW config for %s')%label for target,key,val in cfg: target = '(%s)'%target key = dotted_spaces("%-14s"%key) val = val or _('ON') print ' %-8s %s: %s'%(target,key,val) print # then the parsed results of all of them print _('Full PARSED config') keys = parsed_config.keys() ; keys.sort() # sorted for key in keys: val = parsed_config[key] # filters are the last if key in ['preproc', 'postproc']: continue # flag beautifier if key in FLAGS.keys()+ACTIONS.keys(): val = onoff.get(val) or val # list beautifier if type(val) == type([]): if key == 'options': sep = ' ' else : sep = ', ' val = string.join(val, sep) print "%25s: %s"%(dotted_spaces("%-14s"%key),val) print print _('Active filters') for filter in ['preproc','postproc']: for rule in parsed_config.get(filter) or []: print "%25s: %s -> %s"%( dotted_spaces("%-14s"%filter),rule[0],rule[1]) def get_file_body(file): "Returns all the document BODY lines" return process_source_file(file, noconf=1)[1][2] def finish_him(outlist, config): "Writing output to screen or file" outfile = config['outfile'] outlist = unmaskEscapeChar(outlist) # apply PostProc filters if config['postproc']: filters = compile_filters(config['postproc'], _('Invalid PostProc filter regex')) postoutlist = [] for line in outlist: for rgx,repl in filters: line = rgx.sub(repl, line) postoutlist.append(line) outlist = postoutlist[:] if outfile == STDOUT: if GUI: return outlist, config else: for line in outlist: print line else: Savefile(outfile, addLineBreaks(outlist)) if not GUI and not QUIET: print _('%s wrote %s')%(my_name,outfile) if config['split']: if not QUIET: print "--- html..." sgml2html = 'sgml2html -s %s -l %s %s'%( config['split'],config['lang'] or lang,outfile) if not QUIET: print "Running system command:", sgml2html os.system(sgml2html) def toc_inside_body(body, toc, config): ret = [] if AUTOTOC: return body # nothing to expand toc_mark = MaskMaster().tocmask # expand toc mark with TOC contents for line in body: if string.count(line, toc_mark): # toc mark found if config['toc']: ret.extend(toc) # include if --toc else: pass # or remove %%toc line else: ret.append(line) # common line return ret def toc_tagger(toc, config): "Convert t2t-marked TOC (it is a list) to target-tagged TOC" ret = [] # tag if TOC-only TOC "by hand" (target don't have a TOC tag) if config['toc-only'] or (config['toc'] and not TAGS['TOC']): fakeconf = config.copy() fakeconf['headers'] = 0 fakeconf['toc-only'] = 0 fakeconf['mask-email'] = 0 fakeconf['preproc'] = [] fakeconf['postproc'] = [] fakeconf['css-sugar'] = 0 ret,foo = convert(toc, fakeconf) set_global_config(config) # restore config # target TOC is a tag elif config['toc'] and TAGS['TOC']: ret = [TAGS['TOC']] return ret def toc_formatter(toc, config): "Formats TOC for automatic placement between headers and body" if config['toc-only']: return toc # no formatting needed if not config['toc'] : return [] # TOC disabled ret = toc # TOC open/close tags (if any) if TAGS['tocOpen' ]: ret.insert(0, TAGS['tocOpen']) if TAGS['tocClose']: ret.append(TAGS['tocClose']) # autotoc specific formatting if AUTOTOC: if rules['autotocwithbars']: # TOC between bars para = TAGS['paragraphOpen']+TAGS['paragraphClose'] bar = regex['x'].sub('-'*72,TAGS['bar1']) tocbar = [para, bar, para] ret = tocbar + ret + tocbar if rules['blankendautotoc']: # blank line after TOC ret.append('') if rules['autotocnewpagebefore']: # page break before TOC ret.insert(0,TAGS['pageBreak']) if rules['autotocnewpageafter']: # page break after TOC ret.append(TAGS['pageBreak']) return ret def doHeader(headers, config): if not config['headers']: return [] if not headers: headers = ['','',''] target = config['target'] if not HEADER_TEMPLATE.has_key(target): Error("doheader: Unknow target '%s'"%target) if target in ['html','xhtml'] and config.get('css-sugar'): template = string.split(HEADER_TEMPLATE[target+'css'], '\n') else: template = string.split(HEADER_TEMPLATE[target], '\n') head_data = {'STYLE':'', 'ENCODING':''} for key in head_data.keys(): val = config.get(string.lower(key)) if key == 'ENCODING': val = get_encoding_string(val, target) head_data[key] = val # parse header contents for i in 0,1,2: # expand macros contents = MacroMaster(config=config).expand(headers[i]) # Escapes - on tex, just do it if any \tag{} present if target != 'tex' or \ (target == 'tex' and re.search(r'\\\w+{', contents)): contents = doEscape(target, contents) head_data['HEADER%d'%(i+1)] = contents Debug("Header Data: %s"%head_data, 1) # scan for empty dictionary keys # if found, scan template lines for that key reference # if found, remove the reference # if there isn't any other key reference on the same line, remove it for key in head_data.keys(): if head_data.get(key): continue for line in template: if string.count(line, '%%(%s)s'%key): sline = string.replace(line, '%%(%s)s'%key, '') if not re.search(r'%\([A-Z0-9]+\)s', sline): template.remove(line) # populate template with data template = string.join(template, '\n') % head_data return string.split(template, '\n') def doCommentLine(txt): # the -- string ends a (h|sg|xht)ml comment :( txt = maskEscapeChar(txt) if string.count(TAGS['comment'], '--') and \ string.count(txt, '--'): txt = re.sub('-(?=-)', r'-\\', txt) if TAGS['comment']: return regex['x'].sub(txt, TAGS['comment']) return '' def doFooter(config): if not config['headers']: return [] ret = [] target = config['target'] cmdline = config['realcmdline'] typename = target if target == 'tex': typename = 'LaTeX2e' ppgd = '%s code generated by %s %s (%s)'%( typename,my_name,my_version,my_url) cmdline = 'cmdline: %s %s'%(my_name, string.join(cmdline, ' ')) ret.append('\n'+doCommentLine(ppgd)) ret.append(doCommentLine(cmdline)) ret.append(TAGS['EOD']) return ret def doEscape(target,txt): "Target-specific special escapes. Apply *before* insert any tag." if target in ['html','sgml','xhtml']: txt = re.sub('&','&amp;',txt) txt = re.sub('<','&lt;',txt) txt = re.sub('>','&gt;',txt) if target == 'sgml': txt = re.sub('\xff','&yuml;',txt) # "+y elif target == 'pm6': txt = re.sub('<','<\#60>',txt) elif target == 'mgp': txt = re.sub('^%',' %',txt) # add leading blank to avoid parse elif target == 'man': txt = re.sub("^([.'])", '\\&\\1',txt) # command ID txt = string.replace(txt,ESCCHAR, ESCCHAR+'e') # \e elif target == 'tex': # mark literal \ to be changed to $\backslash$ later txt = string.replace( txt, ESCCHAR, '@@LaTeX-escaping-SUX@@') txt = re.sub('([#$&%{}])', ESCCHAR+r'\1' , txt) # \% txt = re.sub('([~^])' , ESCCHAR+r'\1{}', txt) # \~{} txt = re.sub('([<|>])' , r'$\1$', txt) # $>$ txt = string.replace(txt, '@@LaTeX-escaping-SUX@@', maskEscapeChar(r'$\backslash$')) # TIP the _ is escaped at the end return txt # TODO man: where - really needs to be escaped? def doFinalEscape(target, txt): "Last escapes of each line" if target == 'pm6' : txt = string.replace(txt,ESCCHAR+'<',r'<\#92><') elif target == 'man' : txt = string.replace(txt, '-', r'\-') elif target == 'tex' : txt = string.replace(txt, '_', r'\_') elif target == 'sgml': txt = string.replace(txt, '[', '&lsqb;') return txt def EscapeCharHandler(action, data): "Mask/Unmask the Escape Char on the given string" if not string.strip(data): return data if action not in ['mask','unmask']: Error("EscapeCharHandler: Invalid action '%s'"%action) if action == 'mask': return string.replace(data,'\\',ESCCHAR) else: return string.replace(data,ESCCHAR,'\\') def maskEscapeChar(data): "Replace any Escape Char \ with a text mask (Input: str or list)" if type(data) == type([]): return map(lambda x: EscapeCharHandler('mask', x), data) return EscapeCharHandler('mask',data) def unmaskEscapeChar(data): "Undo the Escape char \ masking (Input: str or list)" if type(data) == type([]): return map(lambda x: EscapeCharHandler('unmask', x), data) return EscapeCharHandler('unmask',data) def addLineBreaks(list): "use LB to respect sys.platform" ret = [] for line in list: line = string.replace(line,'\n',LB) # embedded \n's ret.append(line+LB) # add final line break return ret def compile_filters(filters, errmsg='Filter'): if filters: for i in range(len(filters)): patt,repl = filters[i] try: rgx = re.compile(patt) except: Error("%s: '%s'"%(errmsg, patt)) filters[i] = (rgx,repl) return filters def enclose_me(tagname, txt): return TAGS.get(tagname+'Open') + txt + TAGS.get(tagname+'Close') def beautify_me(name, line): "where name is: bold, italic or underline" name = 'font%s' % string.capitalize(name) open = TAGS['%sOpen'%name] close = TAGS['%sClose'%name] txt = r'%s\1%s'%(open, close) line = regex[name].sub(txt,line) return line def get_tagged_link(label, url): ret = '' target = CONF['target'] image_re = regex['img'] # set link type if regex['email'].match(url): linktype = 'email' else: linktype = 'url'; # escape specials from TEXT parts label = doEscape(target,label) # escape specials from link URL if rules['linkable'] and rules['escapeurl']: url = doEscape(target, url) # if not linkable, the URL is plain text, that needs escape if not rules['linkable']: if target == 'tex': url = re.sub('^#', '\#', url) # ugly, but compile else: url = doEscape(target,url) # adding protocol to guessed link guessurl = '' if linktype == 'url' and \ re.match(regex['_urlskel']['guess'], url): if url[0] == 'w': guessurl = 'http://' +url else : guessurl = 'ftp://' +url # not link aware targets -> protocol is useless if not rules['linkable']: guessurl = '' # simple link (not guessed) if not label and not guessurl: if CONF['mask-email'] and linktype == 'email': # do the email mask feature (no TAGs, just text) url = string.replace(url,'@',' (a) ') url = string.replace(url,'.',' ') url = "<%s>" % url if rules['linkable']: url = doEscape(target, url) ret = url else: # just add link data to tag tag = TAGS[linktype] ret = regex['x'].sub(url,tag) # named link or guessed simple link else: # adjusts for guessed link if not label: label = url # no protocol if guessurl : url = guessurl # with protocol # image inside link! if image_re.match(label): if rules['imglinkable']: # get image tag label = parse_images(label) else: # img@link !supported label = "(%s)"%image_re.match(label).group(1) # putting data on the right appearance order if rules['linkable']: urlorder = [url, label] # link before label else: urlorder = [label, url] # label before link # add link data to tag (replace \a's) ret = TAGS["%sMark"%linktype] for data in urlorder: ret = regex['x'].sub(data,ret,1) return ret def parse_deflist_term(line): "Extract and parse definition list term contents" img_re = regex['img'] term = regex['deflist'].search(line).group(3) # mask image inside term as (image.jpg), where not supported if not rules['imgasdefterm'] and img_re.search(term): while img_re.search(term): imgfile = img_re.search(term).group(1) term = img_re.sub('(%s)'%imgfile, term, 1) #TODO tex: escape ] on term. \], \rbrack{} and \verb!]! don't work :( return term def get_tagged_bar(line): m = regex['bar'].search(line) if not m: return line txt = m.group(2) # set bar type if txt[0] == '=': bar = TAGS['bar2'] else : bar = TAGS['bar1'] # to avoid comment tag confusion like <!-- ------ --> if string.count(TAGS['comment'], '--'): txt = string.replace(txt,'--','__') # tag line return regex['x'].sub(txt, bar) def get_image_align(line): "Return the image (first found) align for the given line" # first clear marks that can mess align detection line = re.sub(SEPARATOR+'$', '', line) # remove deflist sep line = re.sub('^'+SEPARATOR, '', line) # remove list sep line = re.sub('^[\t]+' , '', line) # remove quote mark # get image position on the line m = regex['img'].search(line) ini = m.start() ; head = 0 end = m.end() ; tail = len(line) # the align detection algorithm if ini == head and end != tail: align = 'left' # ^img + text$ elif ini != head and end == tail: align = 'right' # ^text + img$ else : align = 'middle' # default align # some special cases if BLOCK.isblock('table'): align = 'middle' # ignore when table if TARGET == 'mgp' and align == 'middle': align = 'center' return align # reference: http://www.iana.org/assignments/character-sets # http://www.drclue.net/F1.cgi/HTML/META/META.html def get_encoding_string(enc, target): if not enc: return '' # target specific translation table translate = { 'tex': { # missing: ansinew , applemac , cp437 , cp437de , cp865 'us-ascii' : 'ascii', 'windows-1250': 'cp1250', 'windows-1252': 'cp1252', 'ibm850' : 'cp850', 'ibm852' : 'cp852', 'iso-8859-1' : 'latin1', 'iso-8859-2' : 'latin2', 'iso-8859-3' : 'latin3', 'iso-8859-4' : 'latin4', 'iso-8859-5' : 'latin5', 'iso-8859-9' : 'latin9', 'koi8-r' : 'koi8-r' } } # normalization enc = re.sub('(?i)(us[-_]?)?ascii|us|ibm367','us-ascii' , enc) enc = re.sub('(?i)(ibm|cp)?85([02])' ,'ibm85\\2' , enc) enc = re.sub('(?i)(iso[_-]?)?8859[_-]?' ,'iso-8859-' , enc) enc = re.sub('iso-8859-($|[^1-9]).*' ,'iso-8859-1', enc) # apply translation table try: enc = translate[target][string.lower(enc)] except: pass return enc ############################################################################## ##MerryChristmas,IdontwanttofighttonightwithyouImissyourbodyandIneedyourlove## ############################################################################## def process_source_file(file, noconf=0): """ Find and Join all the configuration available for a source file. No sanity checkings are done on this step. It also extracts the source document parts into separate holders. The config scan order is: 1. The user configuration file (i.e. $HOME/.txt2tagsrc) 2. The source document's CONF area 3. The command line options The return data is a tuple of two items: 1. The parsed config dictionary 2. The document's parts, as a (head, conf, body) tuple All the conversion process will be based on the data and configuration returned by this function. The source files is readed on this step only. """ source = SourceDocument(file) head, conf, body = source.split() Message(_("Source document contents stored"),2) if not noconf: # read document config source_raw = source.get_raw_config() # join all the config directives found, then parse it full_raw = RC_RAW + source_raw + CMDLINE_RAW Message(_("Parsing and saving all config found (%03d items)")%( len(full_raw)),1) full_parsed = ConfigMaster(full_raw).parse() # add manually the filemane to the conf dic full_parsed['sourcefile'] = file # maybe should we dump the config found? if full_parsed.get('dump-config'): dumpConfig(source_raw, full_parsed) sys.exit() # okay, all done Debug("FULL config for this file: %s"%full_parsed, 1) else: full_parsed = {} return full_parsed, (head,conf,body) def get_infiles_config(infiles): """ Find and Join into a single list, all configuration available for each input file. This function is supposed to be the very first one to be called, before any processing. """ ret = [] if not infiles: return [] for infile in infiles: ret.append((process_source_file(infile))) return ret def convert_this_files(configs): global CONF for myconf,doc in configs: # multifile support target_head = [] target_toc = [] target_body = [] target_foot = [] source_head, source_conf, source_body = doc myconf = ConfigMaster().sanity(myconf) # compose the target file Headers #TODO escape line before? #TODO see exceptions by tex and mgp Message(_("Composing target Headers"),1) target_head = doHeader(source_head, myconf) # parse the full marked body into tagged target first_body_line = (len(source_head) or 1)+ len(source_conf) + 1 Message(_("Composing target Body"),1) target_body, marked_toc = convert(source_body, myconf, firstlinenr=first_body_line) # make TOC (if needed) Message(_("Composing target TOC"),1) tagged_toc = toc_tagger(marked_toc, myconf) target_toc = toc_formatter(tagged_toc, myconf) target_body = toc_inside_body(target_body, target_toc, myconf) if not AUTOTOC and not myconf['toc-only']: target_toc = [] # compose the target file Footer Message(_("Composing target Footer"),1) target_foot = doFooter(myconf) # finally, we have our document outlist = target_head + target_toc + target_body + target_foot # if on GUI, abort before finish_him # else, write results to file or STDOUT if GUI: return outlist, myconf else: Message(_("Saving results to the output file"),1) finish_him(outlist, myconf) def parse_images(line): "Tag all images found" while regex['img'].search(line) and TAGS['img'] != '[\a]': txt = regex['img'].search(line).group(1) tag = TAGS['img'] # HTML, XHTML and mgp! if rules['imgalignable']: align = get_image_align(line) # add align on tag tag = regex['_imgAlign'].sub(align, tag, 1) # dirty fix to allow centered solo images if align == 'middle' and TARGET in ['html','xhtml']: rest = regex['img'].sub('',line,1) if re.match('^\s+$', rest): tag = "<center>%s</center>" %tag if TARGET == 'tex': tag = re.sub(r'\\b',r'\\\\b',tag) line = regex['img'].sub(tag,line,1) line = regex['x'].sub(txt,line,1) return line def add_inline_tags(line): # beautifiers for beauti in ['Bold', 'Italic', 'Underline']: if regex['font%s'%beauti].search(line): line = beautify_me(beauti, line) line = parse_images(line) return line def get_include_contents(file, path=''): "Parses %!include: value and extract file contents" ids = {'`':'verb', '"':'raw', "'":'passthru' } id = 't2t' # set include type and remove identifier marks mark = file[0] if mark in ids.keys(): if file[:2] == file[-2:] == mark*2: id = ids[mark] # set type file = file[2:-2] # remove marks # handle remote dir execution filepath = os.path.join(path, file) # read included file contents lines = Readfile(filepath, remove_linebreaks=1) # default txt2tags marked text, just BODY matters if id == 't2t': lines = get_file_body(filepath) lines.insert(0, '%%INCLUDED(%s) starts here: %s'%(id,file)) lines.append('%%INCLUDED(%s) ends here: %s'%(id,file)) return id, lines def set_global_config(config): global CONF, TAGS, regex, rules, TARGET CONF = config TAGS = getTags(CONF) rules = getRules(CONF) regex = getRegexes() TARGET = config['target'] # save for buggy functions that need global def convert(bodylines, config, firstlinenr=1): global BLOCK set_global_config(config) target = config['target'] BLOCK = BlockMaster() MASK = MaskMaster() TITLE = TitleMaster() ret = [] f_lastwasblank = 0 # compiling all PreProc regexes pre_filter = compile_filters( CONF['preproc'], _('Invalid PreProc filter regex')) # let's mark it up! linenr = firstlinenr-1 lineref = 0 while lineref < len(bodylines): # defaults MASK.reset() results_box = '' untouchedline = bodylines[lineref] line = re.sub('[\n\r]+$','',untouchedline) # del line break # apply PreProc filters if pre_filter: for rgx,repl in pre_filter: line = rgx.sub(repl, line) line = maskEscapeChar(line) # protect \ char linenr = linenr +1 lineref = lineref +1 Debug(repr(line), 3, linenr) # heavy debug: show each line # any NOT table line (or comment), closes an open table if ( BLOCK.isblock('table') or ( BLOCK.isblock('verb') and BLOCK.prop('mapped') == 'table' ) ) \ and not regex['table'].search(line) \ and not regex['comment'].search(line): ret.extend(BLOCK.blockout()) # any NOT quote line (or comment) closes all open quotes if BLOCK.isblock('quote') \ and not regex['quote'].search(line) \ and not regex['comment'].search(line): while BLOCK.isblock('quote'): ret.extend(BLOCK.blockout()) #-------------------------[ Raw Text ]---------------------- # we're already on a raw block if BLOCK.block() == 'raw': # closing raw if regex['blockRawClose'].search(line): ret.extend(BLOCK.blockout()) continue # normal raw-inside line BLOCK.holdadd(line) continue # detecting raw block init if regex['blockRawOpen'].search(line): ret.extend(BLOCK.blockin('raw')) continue # one line verb-formatted text if regex['1lineRaw'].search(line): ret.extend(BLOCK.blockin('raw')) line = regex['1lineRaw'].sub('',line) BLOCK.holdadd(line) ret.extend(BLOCK.blockout()) continue #-----------------[ Verbatim (PRE-formatted) ]-------------- #TIP we'll never support beautifiers inside verbatim # we're already on a verb block if BLOCK.block() == 'verb': # closing verb if regex['blockVerbClose'].search(line): ret.extend(BLOCK.blockout()) continue # normal verb-inside line BLOCK.holdadd(line) continue # detecting verb block init if regex['blockVerbOpen'].search(line): ret.extend(BLOCK.blockin('verb')) f_lastwasblank = 0 continue # one line verb-formatted text if regex['1lineVerb'].search(line): ret.extend(BLOCK.blockin('verb')) line = regex['1lineVerb'].sub('',line) BLOCK.holdadd(line) ret.extend(BLOCK.blockout()) f_lastwasblank = 0 continue # tables are mapped to verb when target is not table-aware if not rules['tableable'] and regex['table'].search(line): if not BLOCK.isblock('verb'): ret.extend(BLOCK.blockin('verb')) BLOCK.propset('mapped', 'table') BLOCK.holdadd(line) continue #---------------------[ blank lines ]----------------------- if regex['blankline'].search(line): # close open paragraph if BLOCK.isblock('para'): ret.extend(BLOCK.blockout()) f_lastwasblank = 1 continue # close all open quotes while BLOCK.isblock('quote'): ret.extend(BLOCK.blockout()) # closing all open lists if f_lastwasblank: # 2nd consecutive blank if BLOCK.block()[-4:] == 'list': BLOCK.holdaddsub('') # helps parser while BLOCK.depth: # closes list (if any) ret.extend(BLOCK.blockout()) continue # ignore consecutive blanks # paragraph (if any) is wanted inside lists also if BLOCK.block()[-4:] == 'list': BLOCK.holdaddsub('') else: # html: show blank line (needs tag) if target in ['html','xhtml']: ret.append(TAGS['paragraphOpen']+\ TAGS['paragraphClose']) # otherwise we just show a blank line else: ret.append('') f_lastwasblank = 1 continue #---------------------[ special ]--------------------------- if regex['special'].search(line): # include command targ, key, val = ConfigLines().parse_line( line, 'include', target) if key: Debug("Found config '%s', value '%s'"%( key,val),1,linenr) incpath = os.path.dirname(CONF['sourcefile']) incfile = val err = _('A file cannot include itself (loop!)') if CONF['sourcefile'] == incfile: Error("%s: %s"%(err,incfile)) inctype, inclines = get_include_contents( incfile, incpath) # verb, raw and passthru are easy if inctype != 't2t': ret.extend(BLOCK.blockin(inctype)) BLOCK.holdextend(inclines) ret.extend(BLOCK.blockout()) else: # insert include lines into body #TODO del %!include command call #TODO include maxdepth limit bodylines = bodylines[:lineref] \ +inclines \ +bodylines[lineref:] continue else: Debug('Bogus Special Line',1,linenr) #---------------------[ comments ]-------------------------- # just skip them (if not macro) if regex['comment'].search(line) and not \ regex['macros'].match(line) and not \ regex['toc'].match(line): continue # valid line, reset blank status f_lastwasblank = 0 #---------------------[ Horizontal Bar ]-------------------- if regex['bar'].search(line): # a bar closes a paragraph if BLOCK.isblock('para'): ret.extend(BLOCK.blockout()) # we need to close all opened quote blocks # if bar isn't allowed inside or if not a quote line if BLOCK.isblock('quote'): if not rules['barinsidequote'] or \ not regex['quote'].search(line): while BLOCK.isblock('quote'): ret.extend(BLOCK.blockout()) # quote + bar: continue processing for quoting if rules['barinsidequote'] and \ regex['quote'].search(line): pass # just quote: save tagged line and we're done else: line = get_tagged_bar(line) if BLOCK.block()[-4:] == 'list': BLOCK.holdaddsub(line) elif BLOCK.block(): BLOCK.holdadd(line) else: ret.append(line) continue #---------------------[ Title ]----------------------------- #TODO set next blank and set f_lastwasblank or f_lasttitle if (regex['title'].search(line) or regex['numtitle'].search(line)) and \ BLOCK.block()[-4:] != 'list': # a title closes a paragraph if BLOCK.isblock('para'): ret.extend(BLOCK.blockout()) TITLE.add(line) ret.extend(TITLE.get()) f_lastwasblank = 1 continue #---------------------[ %%toc ]----------------------- # %%toc line closes paragraph if BLOCK.block() == 'para' and regex['toc'].search(line): ret.extend(BLOCK.blockout()) #---------------------[ apply masks ]----------------------- line = MASK.mask(line) #XXX from here, only block-inside lines will pass #---------------------[ Quote ]----------------------------- if regex['quote'].search(line): # store number of leading TABS quotedepth = len(regex['quote'].search(line).group(0)) # SGML doesn't support nested quotes if rules['quotenotnested']: quotedepth = 1 # new quote if not BLOCK.isblock('quote'): ret.extend(BLOCK.blockin('quote')) # new subquotes while BLOCK.depth < quotedepth: BLOCK.blockin('quote') # closing quotes while quotedepth < BLOCK.depth: ret.extend(BLOCK.blockout()) #---------------------[ Lists ]----------------------------- # an empty item also closes the current list if BLOCK.block()[-4:] == 'list': m = regex['listclose'].match(line) if m: listindent = m.group(1) listtype = m.group(2) currlisttype = BLOCK.prop('type') currlistindent = BLOCK.prop('indent') if listindent == currlistindent and \ listtype == currlisttype: ret.extend(BLOCK.blockout()) continue if regex['list'].search(line) or \ regex['numlist'].search(line) or \ regex['deflist'].search(line): listindent = BLOCK.prop('indent') listids = string.join(LISTNAMES.keys(), '') m = re.match('^( *)([%s]) '%listids, line) listitemindent = m.group(1) listtype = m.group(2) listname = LISTNAMES[listtype] results_box = BLOCK.holdadd # del list ID (and separate term from definition) if listname == 'deflist': term = parse_deflist_term(line) line = regex['deflist'].sub(term+SEPARATOR,line) else: line = regex[listname].sub(SEPARATOR,line) # don't cross depth limit maxdepth = rules['listmaxdepth'] if maxdepth and BLOCK.depth == maxdepth: if len(listitemindent) > len(listindent): listitemindent = listindent # open mother list or sublist if BLOCK.block()[-4:] != 'list' or \ len(listitemindent) > len(listindent): ret.extend(BLOCK.blockin(listname)) BLOCK.propset('indent',listitemindent) BLOCK.propset('type',listtype) # closing sublists while len(listitemindent) < len(BLOCK.prop('indent')): ret.extend(BLOCK.blockout()) # o-oh, sublist before list ("\n\n - foo\n- foo") # fix: close sublist (as mother), open another list if BLOCK.block()[-4:] != 'list': ret.extend(BLOCK.blockin(listname)) BLOCK.propset('indent',listitemindent) BLOCK.propset('type',listtype) #---------------------[ Table ]----------------------------- #TODO escape undesired format inside table #TODO add pm6 target if regex['table'].search(line): if not BLOCK.isblock('table'): # first table line! ret.extend(BLOCK.blockin('table')) BLOCK.tableparser.__init__(line) tablerow = TableMaster().parse_row(line) BLOCK.tableparser.add_row(tablerow) # save config # maintain line to unmask and inlines line = string.join(tablerow['cells'], SEPARATOR) #---------------------[ Paragraph ]------------------------- if not BLOCK.block() and \ not string.count(line, MASK.tocmask): # new para! ret.extend(BLOCK.blockin('para')) ############################################################ ############################################################ ############################################################ #---------------------[ Final Parses ]---------------------- # the target-specific special char escapes for body lines line = doEscape(target,line) line = add_inline_tags(line) line = MASK.undo(line) #---------------------[ Hold or Return? ]------------------- ### now we must choose here to put the parsed line # if not results_box: # list item extra lines if BLOCK.block()[-4:] == 'list': results_box = BLOCK.holdaddsub # other blocks elif BLOCK.block(): results_box = BLOCK.holdadd # no blocks else: line = doFinalEscape(target, line) results_box = ret.append results_box(line) # EOF: close any open para/verb/lists/table/quotes Debug('EOF',2) while BLOCK.block(): ret.extend(BLOCK.blockout()) # maybe a major tag to enclose body? (like DIV for CSS) if TAGS['bodyOpen' ]: ret.insert(0, TAGS['bodyOpen']) if TAGS['bodyClose']: ret.append(TAGS['bodyClose']) if CONF['toc-only']: ret = [] marked_toc = TITLE.dump_marked_toc(CONF['toc-level']) return ret, marked_toc ############################################################################## ################################### GUI ###################################### ############################################################################## # # tk help: http://python.org/topics/tkinter/ # tuto: http://ibiblio.org/obp/py4fun/gui/tkPhone.html # /usr/lib/python*/lib-tk/Tkinter.py # # grid table : row=0, column=0, columnspan=2, rowspan=2 # grid align : sticky='n,s,e,w' (North, South, East, West) # pack place : side='top,bottom,right,left' # pack fill : fill='x,y,both,none', expand=1 # pack align : anchor='n,s,e,w' (North, South, East, West) # padding : padx=10, pady=10, ipadx=10, ipady=10 (internal) # checkbox : offvalue is return if the _user_ deselected the box # label align: justify=left,right,center def load_GUI_resources(): "Load all extra modules and methods used by GUI" global askopenfilename, showinfo, showwarning, showerror, Tkinter from tkFileDialog import askopenfilename from tkMessageBox import showinfo,showwarning,showerror import Tkinter class Gui: "Graphical Tk Interface" def __init__(self, conf={}): self.root = Tkinter.Tk() # mother window, come to butthead self.root.title(my_name) # window title bar text self.window = self.root # variable "focus" for inclusion self.row = 0 # row count for grid() self.action_lenght = 150 # left column lenght (pixel) self.frame_margin = 10 # frame margin size (pixel) self.frame_border = 6 # frame border size (pixel) # the default Gui colors, can be changed by %!guicolors self.dft_gui_colors = ['blue','white','lightblue','black'] self.gui_colors = [] self.bg1 = self.fg1 = self.bg2 = self.fg2 = '' # on Tk, vars need to be set/get using setvar()/get() self.infile = self.setvar('') self.target = self.setvar('') self.target_name = self.setvar('') # the checks appearance order self.checks = [ 'headers','enum-title','toc','mask-email', 'toc-only','stdout'] # creating variables for all checks for check in self.checks: setattr(self, 'f_'+check, self.setvar('')) # load RC config self.conf = {} if conf: self.load_config(conf) def load_config(self, conf): self.conf = conf self.gui_colors = conf.get('guicolors') or self.dft_gui_colors self.bg1, self.fg1, self.bg2, self.fg2 = self.gui_colors self.root.config(bd=15,bg=self.bg1) ### config as dic for python 1.5 compat (**opts don't work :( ) def entry(self, **opts): return Tkinter.Entry(self.window, opts) def label(self, txt='', bg=None, **opts): opts.update({'text':txt,'bg':bg or self.bg1}) return Tkinter.Label(self.window, opts) def button(self,name,cmd,**opts): opts.update({'text':name,'command':cmd}) return Tkinter.Button(self.window, opts) def check(self,name,checked=0,**opts): bg, fg = self.bg2, self.fg2 opts.update({ 'text':name, 'onvalue':1, 'offvalue':0, 'activeforeground':fg, 'fg':fg, 'activebackground':bg, 'bg':bg, 'highlightbackground':bg, 'anchor':'w' }) chk = Tkinter.Checkbutton(self.window, opts) if checked: chk.select() chk.grid(columnspan=2, sticky='w', padx=0) def menu(self,sel,items): return apply(Tkinter.OptionMenu,(self.window,sel)+tuple(items)) # handy auxiliar functions def action(self, txt): self.label(txt, fg=self.fg1, bg=self.bg1, wraplength=self.action_lenght).grid(column=0,row=self.row) def frame_open(self): self.window = Tkinter.Frame(self.root,bg=self.bg2, borderwidth=self.frame_border) def frame_close(self): self.window.grid(column=1, row=self.row, sticky='w', padx=self.frame_margin) self.window = self.root self.label('').grid() self.row = self.row + 2 # update row count def target_name2key(self): name = self.target_name.get() target = filter(lambda x: TARGET_NAMES[x] == name, TARGETS) try : key = target[0] except: key = '' self.target = self.setvar(key) def target_key2name(self): key = self.target.get() name = TARGET_NAMES.get(key) or key self.target_name = self.setvar(name) def exit(self): self.root.destroy() def setvar(self, val): z = Tkinter.StringVar() ; z.set(val) ; return z def askfile(self): ftypes= [(_('txt2tags files'),('*.t2t','*.txt')), (_('All files'),'*')] newfile = askopenfilename(filetypes=ftypes) if newfile: self.infile.set(newfile) newconf = process_source_file(newfile)[0] newconf = ConfigMaster().sanity(newconf, gui=1) # restate all checkboxes after file selection #TODO how to make a refresh without killing it? self.root.destroy() self.__init__(newconf) self.mainwindow() def scrollwindow(self, txt='no text!', title=''): # create components win = Tkinter.Toplevel() ; win.title(title) frame = Tkinter.Frame(win) scroll = Tkinter.Scrollbar(frame) text = Tkinter.Text(frame,yscrollcommand=scroll.set) button = Tkinter.Button(win) # config text.insert(Tkinter.END, string.join(txt,'\n')) scroll.config(command=text.yview) button.config(text=_('Close'), command=win.destroy) button.focus_set() # packing text.pack(side='left', fill='both', expand=1) scroll.pack(side='right', fill='y') frame.pack(fill='both', expand=1) button.pack(ipadx=30) def runprogram(self): global CMDLINE_RAW # prepare self.target_name2key() infile, target = self.infile.get(), self.target.get() # sanity if not target: showwarning(my_name,_("You must select a target type!")) return if not infile: showwarning(my_name, _("You must provide the source file location!")) return # compose cmdline guiflags = [] real_cmdline_conf = ConfigMaster(CMDLINE_RAW).parse() if real_cmdline_conf.has_key('infile'): del real_cmdline_conf['infile'] if real_cmdline_conf.has_key('target'): del real_cmdline_conf['target'] real_cmdline = CommandLine().compose_cmdline(real_cmdline_conf) default_outfile = ConfigMaster().get_outfile_name( {'sourcefile':infile, 'outfile':'', 'target':target}) for opt in self.checks: val = int(getattr(self, 'f_%s'%opt).get() or "0") if opt == 'stdout': opt = 'outfile' on_config = self.conf.get(opt) or 0 on_cmdline = real_cmdline_conf.get(opt) or 0 if opt == 'outfile': if on_config == STDOUT: on_config = 1 else: on_config = 0 if on_cmdline == STDOUT: on_cmdline = 1 else: on_cmdline = 0 if val != on_config or ( val == on_config == on_cmdline and real_cmdline_conf.has_key(opt)): if val: # was not set, but user selected on GUI Debug("user turned ON: %s"%opt) if opt == 'outfile': opt = '-o-' else: opt = '--%s'%opt else: # was set, but user deselected on GUI Debug("user turned OFF: %s"%opt) if opt == 'outfile': opt = "-o%s"%default_outfile else: opt = '--no-%s'%opt guiflags.append(opt) cmdline = [my_name, '-t', target] +real_cmdline \ +guiflags +[infile] Debug('Gui/Tk cmdline: %s'%cmdline,5) # run! cmdline_raw_orig = CMDLINE_RAW try: # fake the GUI cmdline as the real one, and parse file CMDLINE_RAW = CommandLine().get_raw_config(cmdline[1:]) data = process_source_file(infile) # on GUI, convert_* returns the data, not finish_him() outlist, config = convert_this_files([data]) # on GUI and STDOUT, finish_him() returns the data result = finish_him(outlist, config) # show outlist in s a nice new window if result: outlist, config = result title = _('%s: %s converted to %s')%( my_name, os.path.basename(infile), string.upper(config['target'])) self.scrollwindow(outlist, title) # show the "file saved" message else: msg = "%s\n\n %s\n%s\n\n %s\n%s"%( _('Conversion done!'), _('FROM:'), infile, _('TO:'), config['outfile']) showinfo(my_name, msg) except ZeroDivisionError: # common error, not quit pass except: # fatal error ShowTraceback() print _('Sorry! txt2tags-Tk Fatal Error.') errmsg = '%s\n\n%s\n %s'%( _('Unknown error occurred.'), _('Please send the Error Traceback to the author:'), my_email) showerror(_('%s FATAL ERROR!')%my_name,errmsg) self.exit() CMDLINE_RAW = cmdline_raw_orig def mainwindow(self): self.infile.set(self.conf.get('sourcefile') or '') self.target.set(self.conf.get('target') or \ _('-- select one --')) outfile = self.conf.get('outfile') if outfile == STDOUT: # map -o- self.conf['stdout'] = 1 if self.conf.get('headers') == None: self.conf['headers'] = 1 # map default action1 = _("Enter the source file location:") action2 = _("Choose the target document type:") action3 = _("Some options you may check:") action4 = _("Some extra options:") checks_txt = { 'headers' : _("Include headers on output"), 'enum-title': _("Number titles (1, 1.1, 1.1.1, etc)"), 'toc' : _("Do TOC also (Table of Contents)"), 'mask-email': _("Hide e-mails from SPAM robots"), 'toc-only' : _("Just do TOC, nothing more"), 'stdout' : _("Dump to screen (Don't save target file)") } targets_menu = map(lambda x: TARGET_NAMES[x], TARGETS) # header self.label("%s %s"%(string.upper(my_name), my_version), bg=self.bg2, fg=self.fg2).grid(columnspan=2, ipadx=10) self.label(_("ONE source, MULTI targets")+'\n%s\n'%my_url, bg=self.bg1, fg=self.fg1).grid(columnspan=2) self.row = 2 # choose input file self.action(action1) ; self.frame_open() e_infile = self.entry(textvariable=self.infile,width=25) e_infile.grid(row=self.row, column=0, sticky='e') if not self.infile.get(): e_infile.focus_set() self.button(_("Browse"), self.askfile).grid( row=self.row, column=1, sticky='w', padx=10) # show outfile name, style and encoding (if any) txt = '' if outfile: txt = outfile if outfile == STDOUT: txt = _('<screen>') l_output = self.label(_('Output: ')+txt, fg=self.fg2,bg=self.bg2) l_output.grid(columnspan=2, sticky='w') for setting in ['style','encoding']: if self.conf.get(setting): name = string.capitalize(setting) val = self.conf[setting] self.label('%s: %s'%(name, val), fg=self.fg2, bg=self.bg2).grid( columnspan=2, sticky='w') # choose target self.frame_close() ; self.action(action2) self.frame_open() self.target_key2name() self.menu(self.target_name, targets_menu).grid( columnspan=2, sticky='w') # options checkboxes label self.frame_close() ; self.action(action3) self.frame_open() # compose options check boxes, example: # self.check(checks_txt['toc'],1,variable=self.f_toc) for check in self.checks: # extra options label if check == 'toc-only': self.frame_close() ; self.action(action4) self.frame_open() txt = checks_txt[check] var = getattr(self, 'f_'+check) checked = self.conf.get(check) self.check(txt,checked,variable=var) self.frame_close() # spacer and buttons self.label('').grid() ; self.row = self.row + 1 b_quit = self.button(_("Quit"), self.exit) b_quit.grid(row=self.row, column=0, sticky='w', padx=30) b_conv = self.button(_("Convert!"), self.runprogram) b_conv.grid(row=self.row, column=1, sticky='e', padx=30) if self.target.get() and self.infile.get(): b_conv.focus_set() # as documentation told me if sys.platform[:3] == 'win': self.root.iconify() self.root.update() self.root.deiconify() self.root.mainloop() ############################################################################## ############################################################################## def exec_command_line(user_cmdline=[]): global CMDLINE_RAW, RC_RAW, DEBUG, VERBOSE, QUIET, GUI, Error # extract command line data cmdline_data = user_cmdline or sys.argv[1:] CMDLINE_RAW = CommandLine().get_raw_config(cmdline_data) cmdline_parsed = ConfigMaster(CMDLINE_RAW).parse() DEBUG = cmdline_parsed.get('debug' ) or 0 VERBOSE = cmdline_parsed.get('verbose') or 0 QUIET = cmdline_parsed.get('quiet' ) or 0 GUI = cmdline_parsed.get('gui' ) or 0 infiles = cmdline_parsed.get('infile' ) or [] Message(_("Txt2tags %s processing begins")%my_version,1) # the easy ones if cmdline_parsed.get('help' ): Quit(USAGE) if cmdline_parsed.get('version'): Quit(VERSIONSTR) # multifile haters if len(infiles) > 1: errmsg=_("Option --%s can't be used with multiple input files") for option in ['gui','dump-config']: if cmdline_parsed.get(option): Error(errmsg%option) Debug("system platform: %s"%sys.platform) Debug("line break char: %s"%repr(LB)) Debug("command line: %s"%sys.argv) Debug("command line raw config: %s"%CMDLINE_RAW,1) # extract RC file config if cmdline_parsed.get('rc') == 0: Message(_("Ignoring user configuration file"),1) else: rc_file = get_rc_path() if os.path.isfile(rc_file): Message(_("Loading user configuration file"),1) RC_RAW = ConfigLines(file=rc_file).get_raw_config() Debug("rc file: %s"%rc_file) Debug("rc file raw config: %s"%RC_RAW,1) # get all infiles config (if any) infiles_config = get_infiles_config(infiles) # is GUI available? # try to load and start GUI interface for --gui # if program was called with no arguments, try GUI also if GUI or not infiles: try: load_GUI_resources() Debug("GUI resources OK (Tk module is installed)") winbox = Gui() Debug("GUI display OK") GUI = 1 except: Debug("GUI Error: no Tk module or no DISPLAY") GUI = 0 # user forced --gui, but it's not available if cmdline_parsed.get('gui') and not GUI: ShowTraceback() Error("Sorry, I can't run my Graphical Interface - GUI\n" "- Check if Python Tcl/Tk module is installed (Tkinter)\n" "- Make sure you are in a graphical environment (like X)") # Okay, we will use GUI if GUI: Message(_("We are on GUI interface"),1) # redefine Error function to raise exception instead sys.exit() def Error(msg): showerror(_('txt2tags ERROR!'), msg) raise ZeroDivisionError # if no input file, get RC+cmdline config, else full config if not infiles: gui_conf = ConfigMaster(RC_RAW+CMDLINE_RAW).parse() else: try : gui_conf = infiles_config[0][0] except: gui_conf = {} # sanity is needed to set outfile and other things gui_conf = ConfigMaster().sanity(gui_conf, gui=1) Debug("GUI config: %s"%gui_conf,5) # insert config and populate the nice window! winbox.load_config(gui_conf) winbox.mainwindow() # console mode rocks forever! else: Message(_("We are on Command Line interface"),1) # called with no arguments, show error if not infiles: Error(_('Missing input file (try --help)')) convert_this_files(infiles_config) Message(_("Txt2tags finished sucessfuly"),1) sys.exit(0) if __name__ == '__main__': exec_command_line() # vim: ts=8
txt2tags/old
txt2tags-2.1.py
Python
gpl-2.0
141,312
"""Should be called last, if nothing can do a better job with the URL""" from gruntle.memebot.scanner import Scanner, ScanResult class DefaultScanner(Scanner): rss_templates = {None: 'memebot/scanner/rss/default.html'} def handle(self, response, log, browser): return ScanResult(response=response, override_url=None, title=None, content_type=None, content=None, attr=None) scanner = DefaultScanner()
icucinema/madcow
contrib/django-memebot/gruntle/memebot/scanner/default.py
Python
gpl-3.0
552
# # Script for exporting Blender models (meshes) to Colobot model files # (text format) # # Copyright (C) 2012-2014, TerranovaTeam # bl_info = { "name": "Colobot Model Format (.txt)", "author": "TerranovaTeam", "version": (0, 0, 2), "blender": (2, 6, 4), "location": "File > Export > Colobot (.txt)", "description": "Export Colobot Model Format (.txt)", "warning": "", "wiki_url": "http://colobot.info"\ "", "tracker_url": ""\ "", "category": "Import-Export"} import bpy import struct import array import os import copy import math ## # Data types & helper functions ## FUZZY_TOLERANCE = 1e-5 class ColobotError(Exception): """Exception in I/O operations""" def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def fuzzy_equal_v(v1, v2): for i in range(len(v1)): if abs(v1[i] - v2[i]) > FUZZY_TOLERANCE: return False return True class ColobotVertex: """Vertex as saved in Colobot model file""" def __init__(self): self.coord = array.array('f', [0.0, 0.0, 0.0]) self.normal = array.array('f', [0.0, 0.0, 0.0]) self.t1 = array.array('f', [0.0, 0.0]) self.t2 = array.array('f', [0.0, 0.0]) def __hash__(self): return 1 def __eq__(self, other): return (fuzzy_equal_v(self.coord, other.coord) and fuzzy_equal_v(self.normal, other.normal) and fuzzy_equal_v(self.t1, other.t1) and fuzzy_equal_v(self.t2, other.t2)) class ColobotMaterial: """Material as saved in Colobot model file""" def __init__(self): self.diffuse = array.array('f', [0.0, 0.0, 0.0, 0.0]) self.ambient = array.array('f', [0.0, 0.0, 0.0, 0.0]) self.specular = array.array('f', [0.0, 0.0, 0.0, 0.0]) self.tex1 = '' self.tex2 = '' self.var_tex2 = False self.state = 0 def __hash__(self): return 1 def __eq__(self, other): return (fuzzy_equal_v(self.diffuse, other.diffuse) and fuzzy_equal_v(self.ambient, other.ambient) and fuzzy_equal_v(self.specular, other.specular) and self.tex1 == other.tex1 and self.tex2 == other.tex2 and self.var_tex2 == other.var_tex2 and self.state == other.state) class ColobotTexPair: """Pair of 2 textures""" def __init__(self): self.tex1 = '' self.tex2 = '' def __hash__(self): return 1 def __eq__(self, other): return self.tex1 == other.tex1 and self.tex2 == other.tex2 class ColobotTriangle: """Triangle as saved in Colobot model file""" def __init__(self): self.p = [ColobotVertex(), ColobotVertex(), ColobotVertex()] self.mat = ColobotMaterial() self.lod_level = 0 class ColobotModel: """Colobot model (content of model file)""" def __init__(self): self.version = 1 self.triangles = [] def get_lod_level_list(self): lod_level_set = set() for t in self.triangles: lod_level_set.add(t.lod_level) return list(lod_level_set) def get_tex_pair_list(self): tex_pair_set = set() for t in self.triangles: tex_pair = ColobotTexPair() tex_pair.tex1 = t.mat.tex1 tex_pair.tex2 = t.mat.tex2 tex_pair_set.add(tex_pair) return list(tex_pair_set) def get_triangle_list(self, lod_level): triangles = [] for t in self.triangles: if (t.lod_level == lod_level): triangles.append(t) return triangles def get_vertex_list(self, lod_level): vertex_set = set() for t in self.triangles: if (t.lod_level == lod_level): for i in range(0, 3): vertex_set.add(t.p[i]) return list(vertex_set) def get_material_list(self, lod_level): material_set = set() for t in self.triangles: if (t.lod_level == lod_level): material_set.add(t.mat) return list(material_set) def v3to4(vec): return array.array('f', [vec[0], vec[1], vec[2], 0.0]) def v4to3(vec): return array.array('f', [vec[0], vec[1], vec[2]]) ## # Model file input/output ## def write_colobot_model(filename, model): float_format = "{:g}".format file = open(filename, 'w') file.write('# Colobot text model\n') file.write('\n') file.write('### HEAD\n') file.write('version ' + str(model.version) + '\n') file.write('total_triangles ' + str(len(model.triangles)) + '\n') file.write('\n') file.write('### TRIANGLES\n') for t in model.triangles: for i in range(0, 3): p = t.p[i] file.write('p' + str(i+1)) file.write(' c ' + ' '.join(map(float_format, p.coord ))) file.write(' n ' + ' '.join(map(float_format, p.normal))) file.write(' t1 ' + ' '.join(map(float_format, p.t1))) file.write(' t2 ' + ' '.join(map(float_format, p.t2))) file.write('\n') file.write('mat') file.write(' dif ' + ' '.join(map(float_format, t.mat.diffuse))) file.write(' amb ' + ' '.join(map(float_format, t.mat.ambient))) file.write(' spc ' + ' '.join(map(float_format, t.mat.specular))) file.write('\n') file.write('tex1 ' + t.mat.tex1 + '\n') file.write('tex2 ' + t.mat.tex2 + '\n') file.write('var_tex2 ' + ( 'Y' if t.mat.var_tex2 else 'N' + '\n' ) ) file.write('lod_level ' + str(t.lod_level) + '\n') file.write('state ' + str(t.mat.state) + '\n') file.write('\n') file.close() def token_next_line(lines, index): while (index < len(lines)): line = lines[index] index = index + 1 if (not (len(line) == 0 or line[0] == '#' or line[0] == '\n') ): return ( line.split(), index) raise ColobotError('Unexpected EOF') def read_colobot_vertex(tokens): vertex = ColobotVertex() if (tokens[1] != 'c'): raise ColobotError('Invalid vertex') vertex.coord[0] = float(tokens[2]) vertex.coord[1] = float(tokens[3]) vertex.coord[2] = float(tokens[4]) if (tokens[5] != 'n'): raise ColobotError('Invalid vertex') vertex.normal[0] = float(tokens[6]) vertex.normal[1] = float(tokens[7]) vertex.normal[2] = float(tokens[8]) if (tokens[9] != 't1'): raise ColobotError('Invalid vertex') vertex.t1[0] = float(tokens[10]) vertex.t1[1] = float(tokens[11]) if (tokens[12] != 't2'): raise ColobotError('Invalid vertex') vertex.t2[0] = float(tokens[13]) vertex.t2[1] = float(tokens[14]) return vertex def read_colobot_material(tokens): material = ColobotMaterial() if (tokens[1] != 'dif'): raise ColobotError('Invalid material') material.diffuse[0] = float(tokens[2]) material.diffuse[1] = float(tokens[3]) material.diffuse[2] = float(tokens[4]) material.diffuse[3] = float(tokens[5]) if (tokens[6] != 'amb'): raise ColobotError('Invalid material') material.ambient[0] = float(tokens[7]) material.ambient[1] = float(tokens[8]) material.ambient[2] = float(tokens[9]) material.ambient[3] = float(tokens[10]) if (tokens[11] != 'spc'): raise ColobotError('Invalid material') material.specular[0] = float(tokens[12]) material.specular[1] = float(tokens[13]) material.specular[2] = float(tokens[14]) material.specular[3] = float(tokens[15]) return material def read_colobot_model(filename): model = ColobotModel() file = open(filename, 'r') lines = file.readlines() file.close() index = 0 numTriangles = 0 tokens, index = token_next_line(lines, index) if (tokens[0] != 'version'): raise ColobotError("Invalid header", "version") model.version = int(tokens[1]) if (model.version != 1): raise ColobotError("Unknown model file version") tokens, index = token_next_line(lines, index) if (tokens[0] != 'total_triangles'): raise ColobotError("Invalid header", "total_triangles") numTriangles = int(tokens[1]) for i in range(0, numTriangles): t = ColobotTriangle() tokens, index = token_next_line(lines, index) if (tokens[0] != 'p1'): raise ColobotError("Invalid triangle", "p1") t.p[0] = read_colobot_vertex(tokens) tokens, index = token_next_line(lines, index) if (tokens[0] != 'p2'): raise ColobotError("Invalid triangle", "p2") t.p[1] = read_colobot_vertex(tokens) tokens, index = token_next_line(lines, index) if (tokens[0] != 'p3'): raise ColobotError("Invalid triangle", "p3") t.p[2] = read_colobot_vertex(tokens) tokens, index = token_next_line(lines, index) if (tokens[0] != 'mat'): raise ColobotError("Invalid triangle", "mat") t.mat = read_colobot_material(tokens) tokens, index = token_next_line(lines, index) if (tokens[0] != 'tex1'): raise ColobotError("Invalid triangle", "tex1") if (len(tokens) > 1): t.mat.tex1 = tokens[1] tokens, index = token_next_line(lines, index) if (tokens[0] != 'tex2'): raise ColobotError("Invalid triangle", "tex2") if (len(tokens) > 1): t.mat.tex2 = tokens[1] tokens, index = token_next_line(lines, index) if (tokens[0] != 'var_tex2'): raise ColobotError("Invalid triangle", "var_tex2") t.mat.var_tex2 = tokens[1] == 'Y' tokens, index = token_next_line(lines, index) if (tokens[0] != 'lod_level'): raise ColobotError("Invalid triangle", "lod_level") t.lod_level = int(tokens[1]) tokens, index = token_next_line(lines, index) if (tokens[0] != 'state'): raise ColobotError("Invalid triangle", "state") t.mat.state = int(tokens[1]) model.triangles.append(t) return model ## # Mesh conversion functions ## def append_obj_to_colobot_model(obj, model, scene, defaults): if (obj.type != 'MESH'): raise ColobotError('Only mesh meshs can be exported') for poly in obj.data.polygons: if (poly.loop_total > 3): raise ColobotError('Cannot export polygons with > 3 vertices!') for i, poly in enumerate(obj.data.polygons): t = ColobotTriangle() j = 0 for loop_index in poly.loop_indices: v = obj.data.vertices[obj.data.loops[loop_index].vertex_index] t.p[j].coord = copy.copy(v.co) t.p[j].normal = copy.copy(v.normal) if (len(obj.data.uv_layers) >= 1): t.p[j].t1 = copy.copy(obj.data.uv_layers[0].data[loop_index].uv) t.p[j].t1[1] = 1.0 - t.p[j].t1[1] if (len(obj.data.uv_layers) >= 2): t.p[j].t2 = copy.copy(obj.data.uv_layers[1].data[loop_index].uv) t.p[j].t2[1] = 1.0 - t.p[j].t2[1] j = j + 1 mat = obj.data.materials[poly.material_index] t.mat.diffuse = v3to4(mat.diffuse_color) t.mat.diffuse[3] = mat.alpha t.mat.ambient = v3to4(scene.world.ambient_color * mat.ambient) t.mat.ambient[3] = mat.alpha t.mat.specular = v3to4(mat.specular_color) t.mat.specular[3] = mat.specular_alpha if (mat.texture_slots[0] != None): t.tex1 = bpy.path.basename(mat.texture_slots[0].texture.image.filepath) if (mat.texture_slots[1] != None): t.tex2 = bpy.path.basename(mat.texture_slots[1].texture.image.filepath) t.var_tex2 = mat.get('var_tex2', defaults['var_tex2']) t.state = mat.get('state', defaults['state']) t.lod_level = int(obj.data.get('lod_level', defaults['lod_level'])) model.triangles.append(t) def colobot_model_to_meshes(model, base_mesh_name, texture_dir): def load_tex(name): import os import sys from bpy_extras.image_utils import load_image if (name == ''): return None, None image = load_image(name, texture_dir, recursive=True, place_holder=True) texture = None if image: name = bpy.path.display_name_from_filepath(name) texture = bpy.data.textures.new(name=name, type='IMAGE') texture.image = image return image, texture class Texture: def __init__(self): self.image1 = None self.image2 = None self.tex1 = None self.tex2 = None tex_dict = dict() tex_pair_list = model.get_tex_pair_list() for tex_pair in tex_pair_list: tex_object = Texture() tex_object.image1, tex_object.tex1 = load_tex(tex_pair.tex1) tex_object.image2, tex_object.tex2 = load_tex(tex_pair.tex2) tex_dict[tex_pair] = tex_object meshes = [] index = 0 lod_levels = model.get_lod_level_list() for lod_level in lod_levels: index = index + 1 mesh = bpy.data.meshes.new(name=base_mesh_name + str(index)) triangle_list = model.get_triangle_list(lod_level) vertex_list = model.get_vertex_list(lod_level) material_list = model.get_material_list(lod_level) uv1map = False uv2map = False zero_t = array.array('f', [0.0, 0.0]) for v in vertex_list: if ((not uv1map) and (v.t1 != zero_t)): uv1map = True if ((not uv2map) and (v.t2 != zero_t)): uv2map = True mesh.vertices.add(len(vertex_list)) for i, v in enumerate(mesh.vertices): v.co = copy.copy(vertex_list[i].coord) v.normal = copy.copy(vertex_list[i].normal) for i, m in enumerate(material_list): material = bpy.data.materials.new(name=base_mesh_name + str(index) + '_mat_' + str(i+1)) material.diffuse_color = v4to3(m.diffuse) material.ambient = (m.ambient[0] + m.ambient[1] + m.ambient[2]) / 3.0 material.alpha = (m.diffuse[3] + m.ambient[3]) / 2.0 material.specular_color = v4to3(m.specular) material.specular_alpha = m.specular[3] material.var_tex2 = m.var_tex2 material.state = m.state mesh.materials.append(material) mesh.tessfaces.add(len(triangle_list)) for i, f in enumerate(mesh.tessfaces): t = triangle_list[i] f.material_index = material_list.index(t.mat) for i in range(0, 3): f.vertices[i] = vertex_list.index(t.p[i]) if uv1map: uvlay1 = mesh.tessface_uv_textures.new(name='UV_1') for i, f in enumerate(uvlay1.data): f.uv1[0] = triangle_list[i].p[0].t1[0] f.uv1[1] = 1.0 - triangle_list[i].p[0].t1[1] f.uv2[0] = triangle_list[i].p[1].t1[0] f.uv2[1] = 1.0 - triangle_list[i].p[1].t1[1] f.uv3[0] = triangle_list[i].p[2].t1[0] f.uv3[1] = 1.0 - triangle_list[i].p[2].t1[1] if uv2map: uvlay2 = mesh.tessface_uv_textures.new(name='UV_2') for i, f in enumerate(uvlay2.data): f.uv1[0] = triangle_list[i].p[0].t2[0] f.uv1[1] = 1.0 - triangle_list[i].p[0].t2[1] f.uv2[0] = triangle_list[i].p[1].t2[0] f.uv2[1] = 1.0 - triangle_list[i].p[1].t2[1] f.uv3[0] = triangle_list[i].p[2].t2[0] f.uv3[1] = 1.0 - triangle_list[i].p[2].t2[1] for i, m in enumerate(material_list): tex_pair = ColobotTexPair() tex_pair.tex1 = m.tex1 tex_pair.tex2 = m.tex2 tex_object = tex_dict[tex_pair] if tex_object and tex_object.image1: mtex = mesh.materials[i].texture_slots.add() mtex.texture = tex_object.tex1 mtex.texture_coords = 'UV' mtex.uv_layer = 'UV_1' mtex.use_map_color_diffuse = True for j, face in enumerate(mesh.uv_textures[0].data): if (triangle_list[j].tex1 == m.tex1): face.image = tex_object.image1 if tex_object and tex_object.image2: mtex = mesh.materials[i].texture_slots.add() mtex.texture = tex_object.tex2 mtex.texture_coords = 'UV' mtex.uv_layer = 'UV_2' mtex.use_map_color_diffuse = True for j, face in enumerate(mesh.uv_textures[1].data): if (triangle_list[j].tex2 == m.tex2): face.image = tex_object.image2 mesh.lod_level = str(lod_level) mesh.validate() mesh.update() meshes.append(mesh) return meshes ## # Export UI dialog & operator ## EXPORT_FILEPATH = '' class ExportColobotDialog(bpy.types.Operator): bl_idname = 'object.export_colobot_dialog' bl_label = "Dialog for Colobot export" mode = bpy.props.EnumProperty( name="Mode", items = [('overwrite', "Overwrite", "Overwrite existing model triangles"), ('append', "Append", "Append triangles to existing model")], default='overwrite') default_lod_level = bpy.props.EnumProperty( name="Default LOD level", items = [('0', "Constant", "Constant (always visible)"), ('1', "Low", "Low (visible at furthest distance)"), ('2', "Medium", "Medium (visible at medium distance)"), ('3', "High", "High (visible at closest distance)")], default='0') default_var_tex2 = bpy.props.BoolProperty(name="Default variable 2nd texture", default=False) default_state = bpy.props.IntProperty(name="Default state", default=0) def execute(self, context): global EXPORT_FILEPATH try: defaults = { 'lod_level': self.default_lod_level, 'var_tex2': self.default_var_tex2, 'state': self.default_state } model = ColobotModel() if (self.mode == 'append'): model = read_colobot_model(EXPORT_FILEPATH) for obj in context.selected_objects: rot = obj.rotation_euler rot[0] = rot[0] + math.radians(270) obj.rotation_euler = rot append_obj_to_colobot_model(obj, model, context.scene, defaults) rot = obj.rotation_euler rot[0] = rot[0] + math.radians(90) obj.rotation_euler = rot write_colobot_model(EXPORT_FILEPATH, model) except ColobotError as e: self.report({'ERROR'}, e.args.join(": ")) return {'FINISHED'} self.report({'INFO'}, 'Export OK') return {'FINISHED'} def invoke(self, context, event): context.window_manager.invoke_props_dialog(self, width=500) return {'RUNNING_MODAL'} class ExportColobot(bpy.types.Operator): """Exporter to Colobot text format""" bl_idname = "export.colobot" bl_label = "Export to Colobot" filepath = bpy.props.StringProperty(subtype="FILE_PATH") @classmethod def poll(cls, context): return context.object is not None def execute(self, context): global EXPORT_FILEPATH EXPORT_FILEPATH = self.filepath bpy.ops.object.export_colobot_dialog('INVOKE_DEFAULT') return {'FINISHED'} def invoke(self, context, event): context.window_manager.fileselect_add(self) return {'RUNNING_MODAL'} ## # Import UI dialog & operator # IMPORT_FILEPATH = '' class ImportColobotDialog(bpy.types.Operator): bl_idname = 'object.import_colobot_dialog' bl_label = "Dialog for Colobot import" lod_separate_layers = bpy.props.BoolProperty(name="LOD levels to separate layers", default=True) texture_dir = bpy.props.StringProperty(name="Texture directory", subtype="DIR_PATH") def execute(self, context): global IMPORT_FILEPATH try: texture_dir = self.texture_dir if (texture_dir == ""): texture_dir = os.path.dirname(IMPORT_FILEPATH) model = read_colobot_model(IMPORT_FILEPATH) meshes = colobot_model_to_meshes(model, 'ColobotMesh_', texture_dir) index = 0 for mesh in meshes: index = index + 1 obj = bpy.data.objects.new('ColobotMesh_' + str(index), mesh) rot = obj.rotation_euler rot[0] = rot[0] + math.radians(90) obj.rotation_euler = rot bpy.context.scene.objects.link(obj) bpy.context.scene.objects.active = obj obj.select = True # TODO: doesn't seem to work... if (self.lod_separate_layers): layers = obj.layers for i in range(0, len(layers)): layers[i] = int(mesh.lod_level) == i obj.layers = layers except ColobotError as e: self.report({'ERROR'}, e.args.join(": ")) return {'FINISHED'} self.report({'INFO'}, 'Import OK') return {'FINISHED'} def invoke(self, context, event): context.window_manager.invoke_props_dialog(self, width=500) return {'RUNNING_MODAL'} class ImportColobot(bpy.types.Operator): """Importer from Colobot text format""" bl_idname = "import.colobot" bl_label = "Import from Colobot" filepath = bpy.props.StringProperty(subtype="FILE_PATH") @classmethod def poll(cls, context): return True def execute(self, context): global IMPORT_FILEPATH IMPORT_FILEPATH = self.filepath bpy.ops.object.import_colobot_dialog('INVOKE_DEFAULT') return {'FINISHED'} def invoke(self, context, event): context.window_manager.fileselect_add(self) return {'RUNNING_MODAL'} ## # Registration ## # Callback functions for menu items def export_menu_func(self, context): self.layout.operator_context = 'INVOKE_DEFAULT' self.layout.operator(ExportColobot.bl_idname, text="Colobot (Text Format)") def import_menu_func(self, context): self.layout.operator_context = 'INVOKE_DEFAULT' self.layout.operator(ImportColobot.bl_idname, text="Colobot (Text Format)") # Custom properties for materials def register_material_props(): bpy.types.Mesh.lod_level = bpy.props.EnumProperty(name="LOD level", items = [('0', "Constant", "Constant (always visible)"), ('1', "Low", "Low (visible at furthest distance)"), ('2', "Medium", "Medium (visible at medium distance)"), ('3', "High", "High (visible at closest distance)")]) bpy.types.Material.var_tex2 = bpy.props.BoolProperty(name="Variable 2nd texture", description="2nd texture shall be set to dirtyXX.png") bpy.types.Material.state = bpy.props.IntProperty(name="State", description="Engine render state") # Add-on registration def register(): bpy.utils.register_module(__name__) register_material_props() bpy.types.INFO_MT_file_export.append(export_menu_func) bpy.types.INFO_MT_file_import.append(import_menu_func)
ManuelBlanc/colobot
tools/blender-scripts.py
Python
gpl-3.0
23,570
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models from django.db.models import F from taggit.models import TaggedItem class Migration(DataMigration): def forwards(self, orm): """ At some point we created some tags with identical names. Time to clean that up. """ # find subset of tags with duplicate names tags = orm.DocumentTag.objects.all() tag_names = [] dupe_tag_names = [] for tag in tags: if tag.name in tag_names: dupe_tag_names.append(tag.name) tag_names.append(tag.name) # loop thru the dupes for dupe_name in dupe_tag_names: doc_tags = orm.DocumentTag.objects.filter(name=dupe_name) # keep the first canonical_doc_tag = doc_tags[0] # re-assign the other tagged items to the canonical tag for dupe_tag in doc_tags[1:]: tagged_items = TaggedItem.objects.filter(tag=dupe_tag) tagged_items.update(tag=canonical_doc_tag) # delete the dupe tag dupe_tag.delete() def backwards(self, orm): raise RuntimeError("Cannot reverse this migration.") models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'tidings.watch': { 'Meta': {'object_name': 'Watch'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}), 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'attachments.attachment': { 'Meta': {'object_name': 'Attachment'}, 'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_rev'", 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mindtouch_attachment_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, 'attachments.attachmentrevision': { 'Meta': {'object_name': 'AttachmentRevision'}, 'attachment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['attachments.Attachment']"}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_attachment_revisions'", 'to': "orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '500'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}) }, 'wiki.document': { 'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'}, 'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}), 'defer_rendering': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'html': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'last_rendered_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}), 'mindtouch_page_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}), 'parent_topic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.Document']"}), 'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}), 'render_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'render_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'rendered_errors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'rendered_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, 'wiki.documenttag': { 'Meta': {'object_name': 'DocumentTag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}) }, 'wiki.editortoolbar': { 'Meta': {'object_name': 'EditorToolbar'}, 'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}), 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'wiki.firefoxversion': { 'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'FirefoxVersion'}, 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'firefox_version_set'", 'to': "orm['wiki.Document']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'item_id': ('django.db.models.fields.IntegerField', [], {}) }, 'wiki.helpfulvote': { 'Meta': {'object_name': 'HelpfulVote'}, 'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}), 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}), 'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'}) }, 'wiki.operatingsystem': { 'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'OperatingSystem'}, 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'operating_system_set'", 'to': "orm['wiki.Document']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'item_id': ('django.db.models.fields.IntegerField', [], {}) }, 'wiki.relateddocument': { 'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'}, 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_common': ('django.db.models.fields.IntegerField', [], {}), 'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"}) }, 'wiki.reviewtag': { 'Meta': {'object_name': 'ReviewTag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}) }, 'wiki.reviewtaggedrevision': { 'Meta': {'object_name': 'ReviewTaggedRevision'}, 'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"}) }, 'wiki.revision': { 'Meta': {'object_name': 'Revision'}, 'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'content': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}), 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}), 'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}), 'show_toc': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}), 'summary': ('django.db.models.fields.TextField', [], {}), 'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}) }, 'wiki.taggeddocument': { 'Meta': {'object_name': 'TaggedDocument'}, 'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.DocumentTag']"}) } } complete_apps = ['wiki']
scrollback/kuma
kuma/wiki/migrations/0018_clean_documenttags.py
Python
mpl-2.0
17,300
import os import re import sys import unittest import unittest.mock from pkg_resources import VersionConflict from coalib.coala_main import run_coala from coalib.output.printers.LogPrinter import LogPrinter from coalib import assert_supported_version, coala from pyprint.ConsolePrinter import ConsolePrinter from coala_utils.ContextManagers import prepare_file from coalib.output.Logging import configure_logging from tests.TestUtilities import execute_coala, bear_test_module class coalaTest(unittest.TestCase): def setUp(self): self.old_argv = sys.argv def tearDown(self): sys.argv = self.old_argv def test_coala(self): with bear_test_module(), \ prepare_file(['#fixme'], None) as (lines, filename): retval, stdout, stderr = execute_coala( coala.main, 'coala', '-c', os.devnull, '-f', re.escape(filename), '-b', 'LineCountTestBear') self.assertIn('This file has 1 lines.', stdout, 'The output should report count as 1 lines') self.assertIn('During execution of coala', stderr) self.assertNotEqual(retval, 0, 'coala must return nonzero when errors occured') @unittest.mock.patch('sys.version_info', tuple((2, 7, 11))) def test_python_version_27(self): with self.assertRaises(SystemExit): assert_supported_version() self.assertEqual(cm.error_code, 4) @unittest.mock.patch('sys.version_info', tuple((3, 3, 6))) def test_python_version_33(self): with self.assertRaises(SystemExit): assert_supported_version() self.assertEqual(cm.error_code, 4) def test_python_version_34(self): assert_supported_version() def test_did_nothing(self): retval, stdout, stderr = execute_coala(coala.main, 'coala', '-I', '-S', 'cli.enabled=false') self.assertEqual(retval, 2) self.assertIn('Did you forget to give the `--files`', stderr) self.assertFalse(stdout) retval, stdout, stderr = execute_coala(coala.main, 'coala', '-I', '-b', 'JavaTestBear', '-f', '*.java', '-S', 'cli.enabled=false') self.assertEqual(retval, 2) self.assertIn('Nothing to do.', stderr) self.assertFalse(stdout) def test_show_all_bears(self): with bear_test_module(): retval, stdout, stderr = execute_coala( coala.main, 'coala', '-B', '-I') self.assertEqual(retval, 0) # 7 bears plus 1 line holding the closing colour escape sequence. self.assertEqual(len(stdout.strip().splitlines()), 8) self.assertFalse(stderr) def test_show_language_bears(self): with bear_test_module(): retval, stdout, stderr = execute_coala( coala.main, 'coala', '-B', '-l', 'java', '-I') self.assertEqual(retval, 0) # 2 bears plus 1 line holding the closing colour escape sequence. self.assertEqual(len(stdout.splitlines()), 3) self.assertFalse(stderr) def test_show_capabilities_with_supported_language(self): with bear_test_module(): retval, stdout, stderr = execute_coala( coala.main, 'coala', '-p', 'R', '-I') self.assertEqual(retval, 0) self.assertEqual(len(stdout.splitlines()), 2) self.assertFalse(stderr) @unittest.mock.patch('coalib.parsing.DefaultArgParser.get_all_bears_names') @unittest.mock.patch('coalib.collecting.Collectors.icollect_bears') def test_version_conflict_in_collecting_bears(self, import_fn, _): with bear_test_module(): import_fn.side_effect = VersionConflict('msg1', 'msg2') retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B') self.assertEqual(retval, 13) self.assertIn(('There is a conflict in the version of a ' 'dependency you have installed'), stderr) self.assertIn('pip install "msg2"', stderr) self.assertFalse(stdout) self.assertNotEqual(retval, 0, 'coala must return nonzero when errors occured') @unittest.mock.patch('coalib.collecting.Collectors._import_bears') def test_unimportable_bear(self, import_fn): with bear_test_module(): import_fn.side_effect = SyntaxError retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B') self.assertEqual(retval, 0) self.assertIn('Unable to collect bears from', stderr) self.assertIn('No bears to show.', stdout) import_fn.side_effect = VersionConflict('msg1', 'msg2') retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B') # Note that bear version conflicts don't give exitcode=13, # they just give a warning with traceback in log_level debug. self.assertEqual(retval, 0) self.assertRegex(stderr, 'Unable to collect bears from .* because there ' 'is a conflict with the version of a dependency ' 'you have installed') self.assertIn('pip install "msg2"', stderr) self.assertIn('No bears to show.', stdout) def test_run_coala_no_autoapply(self): with bear_test_module(), \ prepare_file(['#fixme '], None) as (lines, filename): self.assertEqual( 1, len(run_coala( console_printer=ConsolePrinter(), log_printer=LogPrinter(), arg_list=( '-c', os.devnull, '-f', re.escape(filename), '-b', 'SpaceConsistencyTestBear', '--apply-patches', '-S', 'use_spaces=yeah' ), autoapply=False )[0]['cli']) ) self.assertEqual( 0, len(run_coala( console_printer=ConsolePrinter(), log_printer=LogPrinter(), arg_list=( '-c', os.devnull, '-f', re.escape(filename), '-b', 'SpaceConsistencyTestBear', '--apply-patches', '-S', 'use_spaces=yeah' ) )[0]['cli']) ) def test_logged_error_causes_non_zero_exitcode(self): configure_logging() with bear_test_module(), \ prepare_file(['#fixme '], None) as (lines, filename): _, exitcode, _ = run_coala( console_printer=ConsolePrinter(), log_printer=LogPrinter(), arg_list=( '-c', os.devnull, '-f', re.escape(filename), '-b', 'ErrorTestBear' ), autoapply=False ) assert exitcode == 1
refeed/coala
tests/coalaTest.py
Python
agpl-3.0
7,484
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re class CommandFilter(object): """Command filter only checking that the 1st argument matches exec_path""" def __init__(self, exec_path, run_as, *args): self.exec_path = exec_path self.run_as = run_as self.args = args def match(self, userargs): """Only check that the first argument (command) matches exec_path""" if (os.path.basename(self.exec_path) == userargs[0]): return True return False def get_command(self, userargs): """Returns command to execute (with sudo -u if run_as != root).""" if (self.run_as != 'root'): # Used to run commands at lesser privileges return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:] return [self.exec_path] + userargs[1:] def get_environment(self, userargs): """Returns specific environment to set, None if none""" return None class RegExpFilter(CommandFilter): """Command filter doing regexp matching for every argument""" def match(self, userargs): # Early skip if command or number of args don't match if (len(self.args) != len(userargs)): # DENY: argument numbers don't match return False # Compare each arg (anchoring pattern explicitly at end of string) for (pattern, arg) in zip(self.args, userargs): try: if not re.match(pattern + '$', arg): break except re.error: # DENY: Badly-formed filter return False else: # ALLOW: All arguments matched return True # DENY: Some arguments did not match return False class DnsmasqFilter(CommandFilter): """Specific filter for the dnsmasq call (which includes env)""" def match(self, userargs): if (userargs[0].startswith("FLAGFILE=") and userargs[1].startswith("NETWORK_ID=") and userargs[2] == "dnsmasq"): return True return False def get_command(self, userargs): return [self.exec_path] + userargs[3:] def get_environment(self, userargs): env = os.environ.copy() env['FLAGFILE'] = userargs[0].split('=')[-1] env['NETWORK_ID'] = userargs[1].split('=')[-1] return env class KillFilter(CommandFilter): """Specific filter for the kill calls. 1st argument is a list of accepted signals (emptystring means no signal) 2nd argument is a list of accepted affected executables. This filter relies on /proc to accurately determine affected executable, so it will only work on procfs-capable systems (not OSX). """ def match(self, userargs): if userargs[0] != "kill": return False args = list(userargs) if len(args) == 3: signal = args.pop(1) if signal not in self.args[0]: # Requested signal not in accepted list return False else: if len(args) != 2: # Incorrect number of arguments return False if '' not in self.args[0]: # No signal, but list doesn't include empty string return False try: command = os.readlink("/proc/%d/exe" % int(args[1])) # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on # the end if an executable is updated or deleted if command.endswith(" (deleted)"): command = command[:command.rindex(" ")] if command not in self.args[1]: # Affected executable not in accepted list return False except (ValueError, OSError): # Incorrect PID return False return True class ReadFileFilter(CommandFilter): """Specific filter for the utils.read_file_as_root call""" def __init__(self, file_path, *args): self.file_path = file_path super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) def match(self, userargs): if userargs[0] != 'cat': return False if userargs[1] != self.file_path: return False if len(userargs) != 2: return False return True
usc-isi/extra-specs
nova/rootwrap/filters.py
Python
apache-2.0
5,011