repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
teriyakichild/ansible-modules-extras
cloud/vmware/vmware_dvs_portgroup.py
4
6557
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: vmware_dvs_portgroup short_description: Create or remove a Distributed vSwitch portgroup description: - Create or remove a Distributed vSwitch portgroup version_added: 2.0 author: "Joseph Callen (@jcpowermac)" notes: - Tested on vSphere 5.5 requirements: - "python >= 2.6" - PyVmomi options: portgroup_name: description: - The name of the portgroup that is to be created or deleted required: True switch_name: description: - The name of the distributed vSwitch the port group should be created on. required: True vlan_id: description: - The VLAN ID that should be configured with the portgroup required: True num_ports: description: - The number of ports the portgroup should contain required: True portgroup_type: description: - See VMware KB 1022312 regarding portgroup types required: True choices: - 'earlyBinding' - 'lateBinding' - 'ephemeral' extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Create Management portgroup local_action: module: vmware_dvs_portgroup hostname: vcenter_ip_or_hostname username: vcenter_username password: vcenter_password portgroup_name: Management switch_name: dvSwitch vlan_id: 123 num_ports: 120 portgroup_type: earlyBinding state: present ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type): config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() config.name = portgroup_name config.numPorts = num_ports # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation # but this is the correct managed object type. config.defaultPortConfig = vim.VMwareDVSPortSetting() # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the # pyvmomi documentation but this is the correct managed object type config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec() config.defaultPortConfig.vlan.inherited = False config.defaultPortConfig.vlan.vlanId = vlan_id config.type = portgroup_type spec = [config] task = dv_switch.AddDVPortgroup_Task(spec) changed, result = wait_for_task(task) return changed, result def state_destroy_dvspg(module): dvs_portgroup = module.params['dvs_portgroup'] changed = True result = None if not module.check_mode: task = dvs_portgroup.Destroy_Task() changed, result = wait_for_task(task) module.exit_json(changed=changed, result=str(result)) def state_exit_unchanged(module): module.exit_json(changed=False) def state_update_dvspg(module): module.exit_json(changed=False, msg="Currently not implemented.") return def state_create_dvspg(module): switch_name = module.params['switch_name'] portgroup_name = module.params['portgroup_name'] dv_switch = module.params['dv_switch'] vlan_id = module.params['vlan_id'] num_ports = module.params['num_ports'] portgroup_type = module.params['portgroup_type'] changed = True result = None if not module.check_mode: changed, result = create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type) module.exit_json(changed=changed, result=str(result)) def check_dvspg_state(module): switch_name = module.params['switch_name'] portgroup_name = module.params['portgroup_name'] content = connect_to_api(module) module.params['content'] = content dv_switch = find_dvs_by_name(content, switch_name) if dv_switch is None: raise Exception("A distributed virtual switch with name %s does not exist" % switch_name) module.params['dv_switch'] = dv_switch dvs_portgroup = find_dvspg_by_name(dv_switch, portgroup_name) if dvs_portgroup is None: return 'absent' else: module.params['dvs_portgroup'] = dvs_portgroup return 'present' def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), switch_name=dict(required=True, type='str'), vlan_id=dict(required=True, type='int'), num_ports=dict(required=True, type='int'), portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'), state=dict(default='present', choices=['present', 'absent'], type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') try: dvspg_states = { 'absent': { 'present': state_destroy_dvspg, 'absent': state_exit_unchanged, }, 'present': { 'update': state_update_dvspg, 'present': state_exit_unchanged, 'absent': state_create_dvspg, } } dvspg_states[module.params['state']][check_dvspg_state(module)](module) except vmodl.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: module.fail_json(msg=method_fault.msg) except Exception as e: module.fail_json(msg=str(e)) from ansible.module_utils.vmware import * from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
hantek/pylearn2
pylearn2/packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py
44
23194
""" WRITEME """ from __future__ import print_function import inspect import os import StringIO import theano from theano.sandbox.cuda import CudaNdarrayType from theano.gof import local_optimizer from theano.sandbox.cuda.opt import register_opt from theano.sandbox.cuda import gpu_from_host, host_from_gpu from theano.sandbox.cuda.basic_ops import gpu_contiguous from .unshared_conv import FilterActs from .unshared_conv import WeightActs from .unshared_conv import ImgActs _this_dir = os.path.dirname(inspect.getfile(inspect.currentframe())) # XXX: move to cuda.opt and refactor there def any_from_gpu(*vv): """ .. todo:: WRITEME """ for v in vv: if v.owner and v.owner.op == host_from_gpu: return True return False # XXX: move to cuda.opt and refactor there def any_gpu_client(*vv): """ .. todo:: WRITEME """ for v in vv: for (cl, pos) in v.clients: if cl.op == gpu_from_host: return True return False class Base(theano.Op): """ .. todo:: WRITEME Parameters ---------- module_stride : WRITEME partial_sum : WRITEME """ def __init__(self, module_stride, partial_sum): self.module_stride = module_stride self.partial_sum = partial_sum def _attributes(self): """ .. todo:: WRITEME """ return ( self.module_stride, self.partial_sum, ) def __eq__(self, other): """ .. todo:: WRITEME """ return (type(self) == type(other) and self._attributes() == other._attributes()) def __hash__(self): """ .. todo:: WRITEME """ return hash((type(self), self._attributes())) def __str__(self): """ .. todo:: WRITEME """ return '%s{module_stride=%i,partial_sum=%i}' % ( self.__class__.__name__, self.module_stride, self.partial_sum, ) class GpuFilterActs(Base): """ .. todo:: WRITEME """ def make_node(self, images, filters): """ .. todo:: WRITEME """ ibcast = images.broadcastable fbcast = filters.broadcastable igroups, icolors_per_group, irows, icols, icount = ibcast fmodulesR, fmodulesC, fcolors, frows, fcols = fbcast[:-2] fgroups, filters_per_group = fbcast[-2:] hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount) if not isinstance(images.type, CudaNdarrayType): raise TypeError('gpu_filter_acts requires CudaNdarray images', images) if not isinstance(filters.type, CudaNdarrayType): raise TypeError('gpu_filter_acts requires CudaNdarray filters', filters) htype = CudaNdarrayType(broadcastable=hbcast) return theano.gof.Apply(self, [images, filters], [htype()]) def c_support_code(self): """ .. todo:: WRITEME """ cufile = open(os.path.join(_this_dir, 'filter_acts.cu')) return cufile.read() def c_code_cache_version(self): """ .. todo:: WRITEME """ return () def c_code(self, node, nodename, inputs, outputs, sub): """ .. todo:: WRITEME """ #z_out = alpha * dot(x,y) + beta * z_in #inplace version, set set z_out = z_in #not inplace version, we copy z_in to z_out. images, filters, = inputs responses, = outputs fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print(""" //XXX: actually the rightmost images dimension can be strided if (!CudaNdarray_is_c_contiguous(%(images)s)) { PyErr_Format(PyExc_NotImplementedError, "images not c contiguous"); %(fail)s; } if (!CudaNdarray_is_c_contiguous(%(filters)s)) { PyErr_Format(PyExc_NotImplementedError, "filters not c contiguous"); %(fail)s; } if (%(images)s->nd != 5) { PyErr_Format(PyExc_TypeError, "images ndim (%%i) must be 5", %(images)s->nd); %(fail)s; } if (%(filters)s->nd != 7) { PyErr_Format(PyExc_TypeError, "filters ndim (%%i) must be 7", %(filters)s->nd); %(fail)s; } //fprintf(stderr, "really running on GPU\\n"); { // new scope, new vars int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0]; int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1]; int irows = CudaNdarray_HOST_DIMS(%(images)s)[2]; int icols = CudaNdarray_HOST_DIMS(%(images)s)[3]; int icount = CudaNdarray_HOST_DIMS(%(images)s)[4]; int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0]; int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1]; int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2]; int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3]; int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4]; int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5]; int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6]; // XXX: use this parameter properly int paddingStart = 0; int imgStride = icount; float scaleTargets = 0.0; float scaleOutput = 1.0; bool conv = false; if (igroups != fgroups) { PyErr_Format(PyExc_ValueError, "igroups != fgroups (%%i != %%i)", igroups, fgroups); %(fail)s; } if (icolors_per_group != fcolors) { PyErr_Format(PyExc_ValueError, "icolors_per_group != fcolors (%%i != %%i)", icolors_per_group, fcolors); %(fail)s; } if (!%(responses)s) { Py_XDECREF(%(responses)s); int dims[5]; dims[0] = fgroups; dims[1] = filters_per_group; dims[2] = fmodulesR; dims[3] = fmodulesC; dims[4] = icount; %(responses)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims); if (!%(responses)s) { %(fail)s; } } assert(CudaNdarray_is_c_contiguous(%(responses)s)); if (_filterActs( igroups, icolors_per_group, irows, icols, icount, fmodulesR, fmodulesC, frows, fcols, filters_per_group, CudaNdarray_DEV_DATA(%(images)s), CudaNdarray_DEV_DATA(%(filters)s), CudaNdarray_DEV_DATA(%(responses)s), paddingStart, %(moduleStride)s, imgStride, scaleTargets, scaleOutput, conv)) { %(fail)s; } } // end bogus scope used for vars """, file=sio) return sio.getvalue() % locals() @register_opt() @local_optimizer([FilterActs]) def insert_gpu_filter_acts(node): """ .. todo:: WRITEME """ if isinstance(node.op, FilterActs): images, filters = node.inputs if any_from_gpu(images, filters) or any_gpu_client(*node.outputs): gpu_filter_acts = GpuFilterActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_filter_acts( gpu_from_host(images), gpu_from_host(filters)))] class GpuWeightActs(Base): """ .. todo:: WRITEME """ def make_node(self, images, hidacts, frows, fcols): """ .. todo:: WRITEME """ if self.partial_sum != 1: # this corresponds to grad when doing convolution raise NotImplementedError('partial sum') frows = theano.tensor.as_tensor_variable(frows) fcols = theano.tensor.as_tensor_variable(fcols) if frows.dtype[:3] not in ('int', 'uin'): raise TypeError(frows) if fcols.dtype[:3] not in ('int', 'uin'): raise TypeError(frows) if frows.ndim: raise TypeError('frows should be scalar', frows) if fcols.ndim: raise TypeError('fcols should be scalar', fcols) igroups, icolors, irows, icols, icount = images.type.broadcastable hgroups, hcolors, hrows, hcols, hcount = hidacts.type.broadcastable otype = theano.sandbox.cuda.CudaNdarrayType( broadcastable=(hrows, hcols, icolors, False, False, hgroups, hcolors)) return theano.Apply(self, [images, hidacts, frows, fcols], [otype()]) def c_support_code(self): """ .. todo:: WRITEME """ cufile = open(os.path.join(_this_dir, 'weight_acts.cu')) return cufile.read() def c_code_cache_version(self): """ .. todo:: WRITEME """ return () def c_code(self, node, nodename, inames, onames, sub): """ .. todo:: WRITEME """ images, hidacts, frows, fcols = inames dweights, = onames fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print(""" if (!CudaNdarray_is_c_contiguous(%(images)s)) { //XXX: Alex's code actually supports the rightmost images // dimension strided PyErr_Format(PyExc_NotImplementedError, "images not c contiguous"); %(fail)s; } if (!CudaNdarray_is_c_contiguous(%(hidacts)s)) { PyErr_Format(PyExc_NotImplementedError, "hidacts not c contiguous"); %(fail)s; } if (%(images)s->nd != 5) { PyErr_Format(PyExc_TypeError, "images ndim (%%i) must be 5", %(images)s->nd); %(fail)s; } if (%(hidacts)s->nd != 5) { PyErr_Format(PyExc_TypeError, "hidacts ndim (%%i) must be 5", %(images)s->nd); %(fail)s; } if (PyArray_NDIM(%(frows)s) != 0) { PyErr_Format(PyExc_TypeError, "frows ndim (%%i) must be 0", PyArray_NDIM(%(frows)s)); %(fail)s; } if (PyArray_NDIM(%(fcols)s) != 0) { PyErr_Format(PyExc_TypeError, "fcols ndim (%%i) must be 0", PyArray_NDIM(%(fcols)s)); %(fail)s; } { // new scope, new vars int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0]; int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1]; int irows = CudaNdarray_HOST_DIMS(%(images)s)[2]; int icols = CudaNdarray_HOST_DIMS(%(images)s)[3]; int icount = CudaNdarray_HOST_DIMS(%(images)s)[4]; int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0]; int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1]; int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2]; int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3]; int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4]; int fmodulesR = hrows; int fmodulesC = hcols; int fcolors = icolors_per_group; int frows = ((dtype_%(frows)s *) PyArray_DATA(%(frows)s))[0]; int fcols = ((dtype_%(fcols)s *) PyArray_DATA(%(fcols)s))[0]; int fgroups = hgroups; int filters_per_group = hcolors_per_group; // XXX: use this parameter properly int paddingStart = 0; int imgStride = icount; float scaleTargets = 0.0; float scaleOutput = 1.0; int moduleStride = %(moduleStride)s; int partialSum = 1; // set to 0 for convolution. if (igroups != hgroups) { PyErr_Format(PyExc_ValueError, "igroups != hgroups (%%i != %%i)", igroups, hgroups); %(fail)s; } if (icolors_per_group != fcolors) { PyErr_Format(PyExc_ValueError, "icolors_per_group != fcolors (%%i != %%i)", icolors_per_group, fcolors); %(fail)s; } if (icount != hcount) { PyErr_Format(PyExc_ValueError, "icount != hcount (%%i != %%i)", icount, hcount); %(fail)s; } // XXX: CHECK SHAPE IS CORRECT if (!%(dweights)s) { Py_XDECREF(%(dweights)s); int dims[7]; dims[0] = fmodulesR; dims[1] = fmodulesC; dims[2] = fcolors; dims[3] = frows; dims[4] = fcols; dims[5] = fgroups; dims[6] = filters_per_group; %(dweights)s = (CudaNdarray*)CudaNdarray_NewDims(7, dims); if (!%(dweights)s) { %(fail)s; } } assert(CudaNdarray_is_c_contiguous(%(dweights)s)); if (_weightActs( igroups, icolors_per_group, irows, icols, icount, fmodulesR, fmodulesC, frows, fcols, filters_per_group, CudaNdarray_DEV_DATA(%(images)s), CudaNdarray_DEV_DATA(%(hidacts)s), CudaNdarray_DEV_DATA(%(dweights)s), paddingStart, moduleStride, imgStride, scaleTargets, scaleOutput, partialSum)) { %(fail)s; } } // end bogus scope used for vars """, file=sio) return sio.getvalue() % locals() @register_opt() @local_optimizer([WeightActs]) def insert_gpu_weight_acts(node): """ .. todo:: WRITEME """ if isinstance(node.op, WeightActs): """ .. todo:: WRITEME """ images, hidacts, frows, fcols = node.inputs if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs): gpu_weight_acts = GpuWeightActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_weight_acts( gpu_from_host(images), gpu_contiguous(hidacts), frows, fcols, ))] class GpuImgActs(Base): """ .. todo:: WRITEME """ def make_node(self, filters, hidacts, irows, icols): """ .. todo:: WRITEME """ irows = theano.tensor.as_tensor_variable(irows) icols = theano.tensor.as_tensor_variable(icols) if irows.dtype[:3] not in ('int', 'uin'): raise TypeError(irows) if icols.dtype[:3] not in ('int', 'uin'): raise TypeError(irows) if irows.ndim: raise TypeError('irows should be scalar', irows) if icols.ndim: raise TypeError('icols should be scalar', icols) return theano.gof.Apply(self, [filters, hidacts, irows, icols], [hidacts.type()]) def c_support_code(self): """ .. todo:: WRITEME """ cufile = open(os.path.join(_this_dir, 'raw_img_acts.cu')) return cufile.read() def c_code_cache_version(self): """ .. todo:: WRITEME """ return () def c_code(self, node, nodename, inames, onames, sub): """ .. todo:: WRITEME """ filters, hidacts, irows, icols = inames dimages, = onames fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print(""" if (!CudaNdarray_is_c_contiguous(%(filters)s)) { //XXX: Alex's code actually supports the rightmost images // dimension strided PyErr_Format(PyExc_NotImplementedError, "images not c contiguous"); %(fail)s; } if (!CudaNdarray_is_c_contiguous(%(hidacts)s)) { PyErr_Format(PyExc_NotImplementedError, "hidacts not c contiguous"); %(fail)s; } if (%(filters)s->nd != 7) { PyErr_Format(PyExc_TypeError, "images ndim (%%i) must be 7", %(filters)s->nd); %(fail)s; } if (%(hidacts)s->nd != 5) { PyErr_Format(PyExc_TypeError, "hidacts ndim (%%i) must be 5", %(hidacts)s->nd); %(fail)s; } if (PyArray_NDIM(%(irows)s) != 0) { PyErr_Format(PyExc_TypeError, "frows ndim (%%i) must be 0", PyArray_NDIM(%(irows)s)); %(fail)s; } if (PyArray_NDIM(%(icols)s) != 0) { PyErr_Format(PyExc_TypeError, "fcols ndim (%%i) must be 0", PyArray_NDIM(%(icols)s)); %(fail)s; } { // new scope, new vars int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0]; int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1]; int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2]; int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3]; int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4]; int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5]; int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6]; int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0]; int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1]; int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2]; int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3]; int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4]; int igroups = fgroups; int icolors_per_group = fcolors; int irows = ((dtype_%(irows)s *) PyArray_DATA(%(irows)s))[0]; int icols = ((dtype_%(icols)s *) PyArray_DATA(%(icols)s))[0]; int icount = hcount; // TODO: use this parameter properly int paddingStart = 0; float scaleTargets = 0.0; float scaleOutput = 1.0; int moduleStride = %(moduleStride)s; bool conv = 0; if (hgroups != fgroups) { PyErr_Format(PyExc_ValueError, "hgroups != fgroups (%%i != %%i)", hgroups, fgroups); %(fail)s; } if (hcolors_per_group != filters_per_group) { PyErr_Format(PyExc_ValueError, "hcolors_per_group != filters_per_group (%%i != %%i)", hcolors_per_group, filters_per_group); %(fail)s; } // XXX: CHECK SHAPE IS CORRECT if (!%(dimages)s) { Py_XDECREF(%(dimages)s); int dims[5]; dims[0] = igroups; dims[1] = icolors_per_group; dims[2] = irows; dims[3] = icols; dims[4] = icount; %(dimages)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims); if (!%(dimages)s) { %(fail)s; } } assert(CudaNdarray_is_c_contiguous(%(dimages)s)); if (paddingStart + (fmodulesR - 1) * moduleStride + frows < irows) { PyErr_Format(PyExc_ValueError, "uhoh123: %%i %%i %%i %%i %%i", paddingStart, fmodulesR, moduleStride, frows, irows); %(fail)s; } if (_imgActs( fgroups, filters_per_group, fcolors, hcount, fmodulesR, fmodulesC, frows, fcols, irows, icols, CudaNdarray_DEV_DATA(%(filters)s), CudaNdarray_DEV_DATA(%(hidacts)s), CudaNdarray_DEV_DATA(%(dimages)s), paddingStart, moduleStride, scaleTargets, scaleOutput, conv)) { %(fail)s; } } // end bogus scope used for vars """, file=sio) return sio.getvalue() % locals() @register_opt() @local_optimizer([ImgActs]) def insert_gpu_img_acts(node): """ .. todo:: WRITEME """ if isinstance(node.op, ImgActs): filters, hidacts, irows, icols = node.inputs if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs): gpu_img_acts = GpuImgActs( module_stride=node.op.module_stride, partial_sum=1) return [host_from_gpu(gpu_img_acts( gpu_from_host(filters), gpu_contiguous(hidacts), irows, icols, ))]
bsd-3-clause
steedos/odoo7
openerp/addons/base_action_rule/__openerp__.py
57
1998
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Automated Action Rules', 'version': '1.0', 'category': 'Sales Management', 'description': """ This module allows to implement action rules for any object. ============================================================ Use automated actions to automatically trigger actions for various screens. **Example:** A lead created by a specific user may be automatically set to a specific sales team, or an opportunity which still has status pending after 14 days might trigger an automatic reminder email. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': ['base', 'mail'], 'data': [ 'base_action_rule_view.xml', 'security/ir.model.access.csv', 'base_action_rule_data.xml' ], 'demo': [], 'installable': True, 'auto_install': False, 'images': ['images/base_action_rule1.jpeg','images/base_action_rule2.jpeg','images/base_action_rule3.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mkieszek/odoo
addons/product_extended/wizard/wizard_price.py
39
2197
# -*- encoding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.exceptions import UserError from openerp.osv import fields, osv from openerp.tools.translate import _ class wizard_price(osv.osv): _name = "wizard.price" _description = "Compute price wizard" _columns = { 'info_field': fields.text('Info', readonly=True), 'real_time_accounting': fields.boolean("Generate accounting entries when real-time"), 'recursive': fields.boolean("Change prices of child BoMs too"), } def default_get(self, cr, uid, fields, context=None): res = super(wizard_price, self).default_get(cr, uid, fields, context=context) product_pool = self.pool.get('product.template') product_obj = product_pool.browse(cr, uid, context.get('active_id', False)) if context is None: context = {} rec_id = context and context.get('active_id', False) assert rec_id, _('Active ID is not set in Context.') computed_price = product_pool.compute_price(cr, uid, [], template_ids=[product_obj.id], test=True, context=context) if product_obj.id in computed_price: res['info_field'] = "%s: %s" % (product_obj.name, computed_price[product_obj.id]) else: res['info_field'] = "" return res def compute_from_bom(self, cr, uid, ids, context=None): assert len(ids) == 1 if context is None: context = {} model = context.get('active_model') if model != 'product.template': raise UserError(_('This wizard is build for product templates, while you are currently running it from a product variant.')) rec_id = context and context.get('active_id', False) assert rec_id, _('Active ID is not set in Context.') prod_obj = self.pool.get('product.template') res = self.browse(cr, uid, ids, context=context) prod = prod_obj.browse(cr, uid, rec_id, context=context) prod_obj.compute_price(cr, uid, [], template_ids=[prod.id], real_time_accounting=res[0].real_time_accounting, recursive=res[0].recursive, test=False, context=context)
agpl-3.0
tpodowd/boto
boto/sqs/regioninfo.py
167
1524
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.regioninfo import RegionInfo class SQSRegionInfo(RegionInfo): def __init__(self, connection=None, name=None, endpoint=None, connection_cls=None): from boto.sqs.connection import SQSConnection super(SQSRegionInfo, self).__init__(connection, name, endpoint, SQSConnection)
mit
tpaszkowski/quantum
quantum/extensions/portsecurity.py
4
2607
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Nicira Networks, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Aaron Rosen, Nicira, Inc from quantum.api.v2 import attributes from quantum.common import exceptions as qexception class PortSecurityPortHasSecurityGroup(qexception.InUse): message = _("Port has security group associated. Cannot disable port " "security or ip address until security group is removed") class PortSecurityAndIPRequiredForSecurityGroups(qexception.InvalidInput): message = _("Port security must be enabled and port must have an IP" " address in order to use security groups.") class PortSecurityBindingNotFound(qexception.InvalidExtensionEnv): message = _("Port does not have port security binding.") PORTSECURITY = 'port_security_enabled' EXTENDED_ATTRIBUTES_2_0 = { 'networks': { PORTSECURITY: {'allow_post': True, 'allow_put': True, 'convert_to': attributes.convert_to_boolean, 'default': True, 'is_visible': True}, }, 'ports': { PORTSECURITY: {'allow_post': True, 'allow_put': True, 'convert_to': attributes.convert_to_boolean, 'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Portsecurity(object): """Extension class supporting port security """ @classmethod def get_name(cls): return "Port Security" @classmethod def get_alias(cls): return "port-security" @classmethod def get_description(cls): return "Provides port security" @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/portsecurity/api/v1.0" @classmethod def get_updated(cls): return "2012-07-23T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {}
apache-2.0
funkring/fdoo
addons/account/project/project.py
273
2423
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class account_analytic_journal(osv.osv): _name = 'account.analytic.journal' _description = 'Analytic Journal' _columns = { 'name': fields.char('Journal Name', required=True), 'code': fields.char('Journal Code', size=8), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."), 'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."), 'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True), } _defaults = { 'active': True, 'type': 'general', 'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id, } class account_journal(osv.osv): _inherit="account.journal" _columns = { 'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
nocarryr/AV-Asset-Manager
avam/assettags/tag_handler.py
1
8030
from io import BytesIO import random import xml.etree.ElementTree as ET from base64 import b64encode import qrcode from qrcode.image.svg import SvgPathFillImage from wand.image import Image as WandImage from assettags import settings def _generate_code(num_chars=None): if num_chars is None: num_chars = settings.ASSET_TAG_LENGTH choices = settings.ASSET_TAG_CHARS return ''.join(random.choice(choices) for i in range(num_chars)) def generate_code(num_chars=None): f = settings.ASSET_TAG_GENERATE_FUNCTION if f is not None: return f(num_chars) return _generate_code(num_chars) class SvgScaledImage(SvgPathFillImage): def __init__(self, *args, **kwargs): super(SvgScaledImage, self).__init__(*args, **kwargs) def to_string(self): self._img.append(self.make_path()) return ET.tostring(self._img) def _get_path_centered(self, x_pos=0, y_pos=0): xy = [-14.5, -14.5] for i, pos in enumerate([x_pos, y_pos]): if pos < 0: xy[i] -= 14.5 * 2 elif pos > 0: xy[i] += 14.5 * 2 main_g = ET.Element('g', id='qr-box-outer', width='63px', height='63px') inner_g = ET.Element( 'g', id='qr-box-inner', transform='translate({}, {})'.format(*xy), ) inner_g.append(self.make_path()) main_g.append(inner_g) return main_g def get_path_centered(self): return self._get_path_centered(0, 0) def get_path_left(self): return self._get_path_centered(-1, 0) def get_path_right(self): return self._get_path_centered(1, 0) # def _svg(self, viewBox=None, **kwargs): # elem = super(SvgScaledImage, self)._svg(viewBox, **kwargs) # elem.set('width', self.scale) # elem.set('height', self.scale) # return elem def build_qr_svg(code_str, **kwargs): kwargs.setdefault('image_factory', SvgScaledImage) return qrcode.make(code_str, **kwargs) def build_qr_png(code_str): return qrcode.make(code_str) class AssetTagImage(object): def __init__(self, **kwargs): self.asset_tag = kwargs.get('asset_tag') self.template = kwargs.get('template') self.scale = kwargs.get('scale') self.root_tag = kwargs.get('root_tag', 'svg') self.image_format = kwargs.get('image_format', 'svg') self.qr_img = build_qr_svg(self.asset_tag.code) self.qr_img._img.set('width', '100%') self.qr_img._img.set('height', '100%') self.svg = self.build_svg() @property def qr_svg_bytes(self): b = getattr(self, '_qr_svg_bytes', None) if b is None: b = self._qr_svg_bytes = self.get_qr_svg_bytes() return b @property def qr_svg_data_url(self): b = b64encode(self.qr_svg_bytes) return 'data:image/svg+xml;charset=utf-8;base64,{}'.format(b) def get_qr_svg_bytes(self): return ET.tostring(self.svg) @property def png(self): png = getattr(self, '_png', None) if png is None: png = self._png = build_qr_png(self.asset_tag.code) return png._img def get_png_string(self, full_tag=False): fh = BytesIO() if full_tag: with WandImage(blob=self.qr_svg_bytes, format='svg') as img: img.format = 'png' img.save(file=fh) else: self.png.save(fh, 'PNG') s = fh.getvalue() fh.close() return s def get_png_b64_string(self, full_tag=False): s = self.get_png_string(full_tag) return b64encode(s) def get_full_png_b64_string(self): return self.get_png_b64_string(full_tag=True) def build_svg(self): vw = self.template.width vh = self.template.height if self.scale is not None: w, h = [str(s) for s in self.scale] else: w, h = '{}px'.format(vw), '{}px'.format(vh) root = ET.Element( self.root_tag, width=w, height=h, viewBox='0 0 %s %s' % (vw, vh), version='1.1', ) root.set('xmlns', SvgPathFillImage._SVG_namespace) root.extend(self.build_svg_content()) return root def build_svg_content(self): elems = [] w = self.template.width h = self.template.height elems.append(ET.Element( 'rect', width=str(w), height=str(h), id='bg-rect', style='fill:white;stroke:black;', )) if self.template.header_text: elems.append(self.build_header()) code_text = self.build_code_text() if code_text is not None: elems.append(code_text) elems.append(self.build_qr_group()) return elems def format_text_elem(self, loc, **kwargs): x = self.template.width / 2. h = self.template.height kwargs.setdefault('font_size', '13px') kwargs.setdefault('text_align', 'center') kwargs.setdefault('text_anchor', 'middle') kwargs.setdefault('baseline', 'no-change') if loc == 'Above': y = h * .1 kwargs['baseline'] = 'hanging' elif loc == 'Below': y = h - (h * .05) elif loc == 'Left': x = 0. y = h / 2. kwargs['text_align'] = 'left' kwargs['text_anchor'] = 'start' elif loc == 'Right': x = self.template.width y = h / 2. kwargs['text_align'] = 'right' kwargs['text_anchor'] = 'end' else: return None style = ''' font-size:{font_size};font-style:normal;font-weight:normal; text-align:{text_align};line-height:125%;letter-spacing:0px; word-spacing:0px;text-anchor:{text_anchor};fill:#000000; fill-opacity:1;stroke:none;font-family:sans-serif; dominant-baseline:{baseline}'''.format(**kwargs) return dict(x=str(x), y=str(y), style=style) def build_header(self): loc = self.template.get_header_text_location_display() ekwargs = self.format_text_elem(loc, baseline='text-before-edge', font_size='13px') x, y = [ekwargs[k] for k in ['x', 'y']] g = ET.Element('g', id='header-group', transform='translate(%s, %s)' % (x, y)) ekwargs['id'] = 'header-text' t = ET.Element('text', **ekwargs) t.set('xml:space', 'preserve') tspan = ET.Element('tspan', x='0', y='0', id='header-tspan') tspan.text = self.template.header_text t.append(tspan) g.append(t) return g def build_code_text(self): loc = self.template.get_code_text_location_display() ekwargs = self.format_text_elem(loc, font_size='9px') if ekwargs is None: return None x, y = [ekwargs[k] for k in ['x', 'y']] g = ET.Element('g', id='code-text-group', transform='translate(%s, %s)' % (x, y)) ekwargs['id'] = 'code-text' t = ET.Element('text', **ekwargs) t.set('xml:space', 'preserve') tspan = ET.Element('tspan', x='0', y='0', id='code-text-tspan') tspan.text = str(self.asset_tag.code) t.append(tspan) g.append(t) return g def build_qr_group(self): w = self.template.width h = self.template.height h_loc = self.template.get_header_text_location_display() text_loc = self.template.get_code_text_location_display() x = w / 2. y = h / 2. qr_x = 0 qr_y = 0 if h_loc == 'Left' and text_loc != 'Right': qr_x = 1 elif h_loc == 'Right' and text_loc != 'Left': qr_x = -1 if text_loc == 'Above': y += h * .1 scale = 3 g = self.qr_img._get_path_centered(qr_x, qr_y) g.set('transform', 'matrix(%s, 0, 0, %s, %s, %s)' % (scale, scale, x, y)) return g
gpl-3.0
m11s/MissionPlanner
ExtLibs/Mavlink/mavgen_c.py
33
18071
#!/usr/bin/env python ''' parse a MAVLink protocol XML file and generate a C implementation Copyright Andrew Tridgell 2011 Released under GNU GPL version 3 or later ''' import sys, textwrap, os, time import mavparse, mavtemplate t = mavtemplate.MAVTemplate() def generate_version_h(directory, xml): '''generate version.h''' f = open(os.path.join(directory, "version.h"), mode='w') t.write(f,''' /** @file * @brief MAVLink comm protocol built from ${basename}.xml * @see http://pixhawk.ethz.ch/software/mavlink */ #ifndef MAVLINK_VERSION_H #define MAVLINK_VERSION_H #define MAVLINK_BUILD_DATE "${parse_time}" #define MAVLINK_WIRE_PROTOCOL_VERSION "${wire_protocol_version}" #define MAVLINK_MAX_DIALECT_PAYLOAD_SIZE ${largest_payload} #endif // MAVLINK_VERSION_H ''', xml) f.close() def generate_mavlink_h(directory, xml): '''generate mavlink.h''' f = open(os.path.join(directory, "mavlink.h"), mode='w') t.write(f,''' /** @file * @brief MAVLink comm protocol built from ${basename}.xml * @see http://pixhawk.ethz.ch/software/mavlink */ #ifndef MAVLINK_H #define MAVLINK_H #ifndef MAVLINK_STX #define MAVLINK_STX ${protocol_marker} #endif #ifndef MAVLINK_ENDIAN #define MAVLINK_ENDIAN ${mavlink_endian} #endif #ifndef MAVLINK_ALIGNED_FIELDS #define MAVLINK_ALIGNED_FIELDS ${aligned_fields_define} #endif #ifndef MAVLINK_CRC_EXTRA #define MAVLINK_CRC_EXTRA ${crc_extra_define} #endif #include "version.h" #include "${basename}.h" #endif // MAVLINK_H ''', xml) f.close() def generate_main_h(directory, xml): '''generate main header per XML file''' f = open(os.path.join(directory, xml.basename + ".h"), mode='w') t.write(f, ''' /** @file * @brief MAVLink comm protocol generated from ${basename}.xml * @see http://qgroundcontrol.org/mavlink/ */ #ifndef ${basename_upper}_H #define ${basename_upper}_H #ifdef __cplusplus extern "C" { #endif // MESSAGE LENGTHS AND CRCS #ifndef MAVLINK_MESSAGE_LENGTHS #define MAVLINK_MESSAGE_LENGTHS {${message_lengths_array}} #endif #ifndef MAVLINK_MESSAGE_CRCS #define MAVLINK_MESSAGE_CRCS {${message_crcs_array}} #endif #ifndef MAVLINK_MESSAGE_INFO #define MAVLINK_MESSAGE_INFO {${message_info_array}} #endif #include "../protocol.h" #define MAVLINK_ENABLED_${basename_upper} ${{include_list:#include "../${base}/${base}.h" }} // MAVLINK VERSION #ifndef MAVLINK_VERSION #define MAVLINK_VERSION ${version} #endif #if (MAVLINK_VERSION == 0) #undef MAVLINK_VERSION #define MAVLINK_VERSION ${version} #endif // ENUM DEFINITIONS ${{enum: /** @brief ${description} */ #ifndef HAVE_ENUM_${name} #define HAVE_ENUM_${name} enum ${name} { ${{entry: ${name}=${value}, /* ${description} |${{param:${description}| }} */ }} }; #endif }} // MESSAGE DEFINITIONS ${{message:#include "./mavlink_msg_${name_lower}.h" }} #ifdef __cplusplus } #endif // __cplusplus #endif // ${basename_upper}_H ''', xml) f.close() def generate_message_h(directory, m): '''generate per-message header for a XML file''' f = open(os.path.join(directory, 'mavlink_msg_%s.h' % m.name_lower), mode='w') t.write(f, ''' // MESSAGE ${name} PACKING #define MAVLINK_MSG_ID_${name} ${id} typedef struct __mavlink_${name_lower}_t { ${{ordered_fields: ${type} ${name}${array_suffix}; ///< ${description} }} } mavlink_${name_lower}_t; #define MAVLINK_MSG_ID_${name}_LEN ${wire_length} #define MAVLINK_MSG_ID_${id}_LEN ${wire_length} ${{array_fields:#define MAVLINK_MSG_${msg_name}_FIELD_${name_upper}_LEN ${array_length} }} #define MAVLINK_MESSAGE_INFO_${name} { \\ "${name}", \\ ${num_fields}, \\ { ${{ordered_fields: { "${name}", ${c_print_format}, MAVLINK_TYPE_${type_upper}, ${array_length}, ${wire_offset}, offsetof(mavlink_${name_lower}_t, ${name}) }, \\ }} } \\ } /** * @brief Pack a ${name_lower} message * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param msg The MAVLink message to compress the data into * ${{arg_fields: * @param ${name} ${description} }} * @return length of the message in bytes (excluding serial stream start sign) */ static inline uint16_t mavlink_msg_${name_lower}_pack(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg, ${{arg_fields: ${array_const}${type} ${array_prefix}${name},}}) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[${wire_length}]; ${{scalar_fields: _mav_put_${type}(buf, ${wire_offset}, ${putname}); }} ${{array_fields: _mav_put_${type}_array(buf, ${wire_offset}, ${name}, ${array_length}); }} memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, ${wire_length}); #else mavlink_${name_lower}_t packet; ${{scalar_fields: packet.${name} = ${putname}; }} ${{array_fields: mav_array_memcpy(packet.${name}, ${name}, sizeof(${type})*${array_length}); }} memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, ${wire_length}); #endif msg->msgid = MAVLINK_MSG_ID_${name}; return mavlink_finalize_message(msg, system_id, component_id, ${wire_length}${crc_extra_arg}); } /** * @brief Pack a ${name_lower} message on a channel * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param chan The MAVLink channel this message was sent over * @param msg The MAVLink message to compress the data into ${{arg_fields: * @param ${name} ${description} }} * @return length of the message in bytes (excluding serial stream start sign) */ static inline uint16_t mavlink_msg_${name_lower}_pack_chan(uint8_t system_id, uint8_t component_id, uint8_t chan, mavlink_message_t* msg, ${{arg_fields:${array_const}${type} ${array_prefix}${name},}}) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[${wire_length}]; ${{scalar_fields: _mav_put_${type}(buf, ${wire_offset}, ${putname}); }} ${{array_fields: _mav_put_${type}_array(buf, ${wire_offset}, ${name}, ${array_length}); }} memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, ${wire_length}); #else mavlink_${name_lower}_t packet; ${{scalar_fields: packet.${name} = ${putname}; }} ${{array_fields: mav_array_memcpy(packet.${name}, ${name}, sizeof(${type})*${array_length}); }} memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, ${wire_length}); #endif msg->msgid = MAVLINK_MSG_ID_${name}; return mavlink_finalize_message_chan(msg, system_id, component_id, chan, ${wire_length}${crc_extra_arg}); } /** * @brief Encode a ${name_lower} struct into a message * * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param msg The MAVLink message to compress the data into * @param ${name_lower} C-struct to read the message contents from */ static inline uint16_t mavlink_msg_${name_lower}_encode(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg, const mavlink_${name_lower}_t* ${name_lower}) { return mavlink_msg_${name_lower}_pack(system_id, component_id, msg,${{arg_fields: ${name_lower}->${name},}}); } /** * @brief Send a ${name_lower} message * @param chan MAVLink channel to send the message * ${{arg_fields: * @param ${name} ${description} }} */ #ifdef MAVLINK_USE_CONVENIENCE_FUNCTIONS static inline void mavlink_msg_${name_lower}_send(mavlink_channel_t chan,${{arg_fields: ${array_const}${type} ${array_prefix}${name},}}) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[${wire_length}]; ${{scalar_fields: _mav_put_${type}(buf, ${wire_offset}, ${putname}); }} ${{array_fields: _mav_put_${type}_array(buf, ${wire_offset}, ${name}, ${array_length}); }} _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_${name}, buf, ${wire_length}${crc_extra_arg}); #else mavlink_${name_lower}_t packet; ${{scalar_fields: packet.${name} = ${putname}; }} ${{array_fields: mav_array_memcpy(packet.${name}, ${name}, sizeof(${type})*${array_length}); }} _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_${name}, (const char *)&packet, ${wire_length}${crc_extra_arg}); #endif } #endif // MESSAGE ${name} UNPACKING ${{fields: /** * @brief Get field ${name} from ${name_lower} message * * @return ${description} */ static inline ${return_type} mavlink_msg_${name_lower}_get_${name}(const mavlink_message_t* msg${get_arg}) { return _MAV_RETURN_${type}${array_tag}(msg, ${array_return_arg} ${wire_offset}); } }} /** * @brief Decode a ${name_lower} message into a struct * * @param msg The message to decode * @param ${name_lower} C-struct to decode the message contents into */ static inline void mavlink_msg_${name_lower}_decode(const mavlink_message_t* msg, mavlink_${name_lower}_t* ${name_lower}) { #if MAVLINK_NEED_BYTE_SWAP ${{ordered_fields: ${decode_left}mavlink_msg_${name_lower}_get_${name}(msg${decode_right}); }} #else memcpy(${name_lower}, _MAV_PAYLOAD(msg), ${wire_length}); #endif } ''', m) f.close() def generate_testsuite_h(directory, xml): '''generate testsuite.h per XML file''' f = open(os.path.join(directory, "testsuite.h"), mode='w') t.write(f, ''' /** @file * @brief MAVLink comm protocol testsuite generated from ${basename}.xml * @see http://qgroundcontrol.org/mavlink/ */ #ifndef ${basename_upper}_TESTSUITE_H #define ${basename_upper}_TESTSUITE_H #ifdef __cplusplus extern "C" { #endif #ifndef MAVLINK_TEST_ALL #define MAVLINK_TEST_ALL ${{include_list:static void mavlink_test_${base}(uint8_t, uint8_t, mavlink_message_t *last_msg); }} static void mavlink_test_${basename}(uint8_t, uint8_t, mavlink_message_t *last_msg); static void mavlink_test_all(uint8_t system_id, uint8_t component_id, mavlink_message_t *last_msg) { ${{include_list: mavlink_test_${base}(system_id, component_id, last_msg); }} mavlink_test_${basename}(system_id, component_id, last_msg); } #endif ${{include_list:#include "../${base}/testsuite.h" }} ${{message: static void mavlink_test_${name_lower}(uint8_t system_id, uint8_t component_id, mavlink_message_t *last_msg) { mavlink_message_t msg; uint8_t buffer[MAVLINK_MAX_PACKET_LEN]; uint16_t i; mavlink_${name_lower}_t packet_in = { ${{ordered_fields:${c_test_value}, }}}; mavlink_${name_lower}_t packet1, packet2; memset(&packet1, 0, sizeof(packet1)); ${{scalar_fields: packet1.${name} = packet_in.${name}; }} ${{array_fields: mav_array_memcpy(packet1.${name}, packet_in.${name}, sizeof(${type})*${array_length}); }} memset(&packet2, 0, sizeof(packet2)); mavlink_msg_${name_lower}_encode(system_id, component_id, &msg, &packet1); mavlink_msg_${name_lower}_decode(&msg, &packet2); MAVLINK_ASSERT(memcmp(&packet1, &packet2, sizeof(packet1)) == 0); memset(&packet2, 0, sizeof(packet2)); mavlink_msg_${name_lower}_pack(system_id, component_id, &msg ${{arg_fields:, packet1.${name} }}); mavlink_msg_${name_lower}_decode(&msg, &packet2); MAVLINK_ASSERT(memcmp(&packet1, &packet2, sizeof(packet1)) == 0); memset(&packet2, 0, sizeof(packet2)); mavlink_msg_${name_lower}_pack_chan(system_id, component_id, MAVLINK_COMM_0, &msg ${{arg_fields:, packet1.${name} }}); mavlink_msg_${name_lower}_decode(&msg, &packet2); MAVLINK_ASSERT(memcmp(&packet1, &packet2, sizeof(packet1)) == 0); memset(&packet2, 0, sizeof(packet2)); mavlink_msg_to_send_buffer(buffer, &msg); for (i=0; i<mavlink_msg_get_send_buffer_length(&msg); i++) { comm_send_ch(MAVLINK_COMM_0, buffer[i]); } mavlink_msg_${name_lower}_decode(last_msg, &packet2); MAVLINK_ASSERT(memcmp(&packet1, &packet2, sizeof(packet1)) == 0); memset(&packet2, 0, sizeof(packet2)); mavlink_msg_${name_lower}_send(MAVLINK_COMM_1 ${{arg_fields:, packet1.${name} }}); mavlink_msg_${name_lower}_decode(last_msg, &packet2); MAVLINK_ASSERT(memcmp(&packet1, &packet2, sizeof(packet1)) == 0); } }} static void mavlink_test_${basename}(uint8_t system_id, uint8_t component_id, mavlink_message_t *last_msg) { ${{message: mavlink_test_${name_lower}(system_id, component_id, last_msg); }} } #ifdef __cplusplus } #endif // __cplusplus #endif // ${basename_upper}_TESTSUITE_H ''', xml) f.close() def copy_fixed_headers(directory, xml): '''copy the fixed protocol headers to the target directory''' import shutil hlist = [ 'protocol.h', 'mavlink_helpers.h', 'mavlink_types.h', 'checksum.h' ] basepath = os.path.dirname(os.path.realpath(__file__)) srcpath = os.path.join(basepath, 'C/include_v%s' % xml.wire_protocol_version) print("Copying fixed headers") for h in hlist: src = os.path.realpath(os.path.join(srcpath, h)) dest = os.path.realpath(os.path.join(directory, h)) if src == dest: continue shutil.copy(src, dest) class mav_include(object): def __init__(self, base): self.base = base def generate_one(basename, xml): '''generate headers for one XML file''' directory = os.path.join(basename, xml.basename) print("Generating C implementation in directory %s" % directory) mavparse.mkdir_p(directory) if xml.little_endian: xml.mavlink_endian = "MAVLINK_LITTLE_ENDIAN" else: xml.mavlink_endian = "MAVLINK_BIG_ENDIAN" if xml.crc_extra: xml.crc_extra_define = "1" else: xml.crc_extra_define = "0" if xml.sort_fields: xml.aligned_fields_define = "1" else: xml.aligned_fields_define = "0" # work out the included headers xml.include_list = [] for i in xml.include: base = i[:-4] xml.include_list.append(mav_include(base)) # form message lengths array xml.message_lengths_array = '' for mlen in xml.message_lengths: xml.message_lengths_array += '%u, ' % mlen xml.message_lengths_array = xml.message_lengths_array[:-2] # and message CRCs array xml.message_crcs_array = '' for crc in xml.message_crcs: xml.message_crcs_array += '%u, ' % crc xml.message_crcs_array = xml.message_crcs_array[:-2] # form message info array xml.message_info_array = '' for name in xml.message_names: if name is not None: xml.message_info_array += 'MAVLINK_MESSAGE_INFO_%s, ' % name else: # Several C compilers don't accept {NULL} for # multi-dimensional arrays and structs # feed the compiler a "filled" empty message xml.message_info_array += '{"EMPTY",0,{{"","",MAVLINK_TYPE_CHAR,0,0,0}}}, ' xml.message_info_array = xml.message_info_array[:-2] # add some extra field attributes for convenience with arrays for m in xml.message: m.msg_name = m.name if xml.crc_extra: m.crc_extra_arg = ", %s" % m.crc_extra else: m.crc_extra_arg = "" for f in m.fields: if f.print_format is None: f.c_print_format = 'NULL' else: f.c_print_format = '"%s"' % f.print_format if f.array_length != 0: f.array_suffix = '[%u]' % f.array_length f.array_prefix = '*' f.array_tag = '_array' f.array_arg = ', %u' % f.array_length f.array_return_arg = '%s, %u, ' % (f.name, f.array_length) f.array_const = 'const ' f.decode_left = '' f.decode_right = ', %s->%s' % (m.name_lower, f.name) f.return_type = 'uint16_t' f.get_arg = ', %s *%s' % (f.type, f.name) if f.type == 'char': f.c_test_value = '"%s"' % f.test_value else: test_strings = [] for v in f.test_value: test_strings.append(str(v)) f.c_test_value = '{ %s }' % ', '.join(test_strings) else: f.array_suffix = '' f.array_prefix = '' f.array_tag = '' f.array_arg = '' f.array_return_arg = '' f.array_const = '' f.decode_left = "%s->%s = " % (m.name_lower, f.name) f.decode_right = '' f.get_arg = '' f.return_type = f.type if f.type == 'char': f.c_test_value = "'%s'" % f.test_value elif f.type == 'uint64_t': f.c_test_value = "%sULL" % f.test_value elif f.type == 'int64_t': f.c_test_value = "%sLL" % f.test_value else: f.c_test_value = f.test_value # cope with uint8_t_mavlink_version for m in xml.message: m.arg_fields = [] m.array_fields = [] m.scalar_fields = [] for f in m.ordered_fields: if f.array_length != 0: m.array_fields.append(f) else: m.scalar_fields.append(f) for f in m.fields: if not f.omit_arg: m.arg_fields.append(f) f.putname = f.name else: f.putname = f.const_value generate_mavlink_h(directory, xml) generate_version_h(directory, xml) generate_main_h(directory, xml) for m in xml.message: generate_message_h(directory, m) generate_testsuite_h(directory, xml) def generate(basename, xml_list): '''generate complete MAVLink C implemenation''' for xml in xml_list: generate_one(basename, xml) copy_fixed_headers(basename, xml_list[0])
gpl-3.0
anandbhoraskar/Diamond
src/collectors/portstat/tests/test_portstat.py
21
2057
from test import CollectorTestCase from test import get_collector_config from mock import call, Mock, patch from unittest import TestCase from diamond.collector import Collector from portstat import get_port_stats, PortStatCollector class PortStatCollectorTestCase(CollectorTestCase): TEST_CONFIG = { 'port': { 'something1': { 'number': 5222, }, 'something2': { 'number': 8888, } } } def setUp(self): config = get_collector_config('PortStatCollector', self.TEST_CONFIG) self.collector = PortStatCollector(config, None) def test_import(self): self.assertTrue(PortStatCollector) @patch('portstat.get_port_stats') @patch.object(Collector, 'publish') def test_collect(self, publish_mock, get_port_stats_mock): get_port_stats_mock.return_value = {'foo': 1} self.collector.collect() get_port_stats_mock.assert_has_calls([call(5222), call(8888)], any_order=True) self.assertPublished(publish_mock, 'something1.foo', 1) self.assertPublished(publish_mock, 'something2.foo', 1) class GetPortStatsTestCase(TestCase): @patch('portstat.psutil.net_connections') def test_get_port_stats(self, net_connections_mock): ports = [Mock() for _ in range(5)] ports[0].laddr = (None, 5222) ports[0].status = 'ok' ports[1].laddr = ports[2].laddr = ports[3].laddr = (None, 8888) ports[1].status = 'ok' ports[2].status = 'OK' ports[3].status = 'bad' ports[4].laddr = (None, 9999) net_connections_mock.return_value = ports cnts = get_port_stats(5222) self.assertEqual(net_connections_mock.call_count, 1) self.assertEqual(cnts, {'ok': 1}) cnts = get_port_stats(8888) self.assertEqual(net_connections_mock.call_count, 2) self.assertEqual(cnts, {'ok': 2, 'bad': 1})
mit
Johnzero/erp
openerp/addons/auth_openid/controllers/main.py
3
9131
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import sys import urllib import werkzeug.urls import werkzeug.exceptions from openerp.modules.registry import RegistryManager try: import openerp.addons.web.common.http as openerpweb except ImportError: import web.common.http as openerpweb from openid import oidutil from openid.store import memstore #from openid.store import filestore from openid.consumer import consumer from openid.cryptutil import randomString from openid.extensions import ax, sreg from .. import utils _logger = logging.getLogger('web.auth_openid') oidutil.log = logging.getLogger('openid').debug class GoogleAppsAwareConsumer(consumer.GenericConsumer): def complete(self, message, endpoint, return_to): if message.getOpenIDNamespace() == consumer.OPENID2_NS: server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', consumer.no_default) if server_url.startswith('https://www.google.com/a/'): # update fields for attr in ['claimed_id', 'identity']: value = message.getArg(consumer.OPENID2_NS, attr) value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % urllib.quote_plus(value) message.setArg(consumer.OPENID2_NS, attr, value) # now, resign the message assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle') assoc = self.store.getAssociation(server_url, assoc_handle) message.delArg(consumer.OPENID2_NS, 'sig') message.delArg(consumer.OPENID2_NS, 'signed') message = assoc.signMessage(message) return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to) class OpenIDController(openerpweb.Controller): _cp_path = '/auth_openid/login' _store = memstore.MemoryStore() # TODO use a filestore _REQUIRED_ATTRIBUTES = ['email'] _OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split() def _add_extensions(self, request): """Add extensions to the request""" sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES, optional=self._OPTIONAL_ATTRIBUTES) request.addExtension(sreg_request) ax_request = ax.FetchRequest() for alias in self._REQUIRED_ATTRIBUTES: uri = utils.SREG2AX[alias] ax_request.add(ax.AttrInfo(uri, required=True, alias=alias)) for alias in self._OPTIONAL_ATTRIBUTES: uri = utils.SREG2AX[alias] ax_request.add(ax.AttrInfo(uri, required=False, alias=alias)) request.addExtension(ax_request) def _get_attributes_from_success_response(self, success_response): attrs = {} all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response) if sreg_resp: for attr in all_attrs: value = sreg_resp.get(attr) if value is not None: attrs[attr] = value ax_resp = ax.FetchResponse.fromSuccessResponse(success_response) if ax_resp: for attr in all_attrs: value = ax_resp.getSingle(utils.SREG2AX[attr]) if value is not None: attrs[attr] = value return attrs def _get_realm(self, req): return req.httprequest.host_url @openerpweb.jsonrequest def verify(self, req, db, url): redirect_to = werkzeug.urls.Href(req.httprequest.host_url + 'auth_openid/login/process')(session_id=req.session_id) realm = self._get_realm(req) session = dict(dbname=db, openid_url=url) # TODO add origin page ? oidconsumer = consumer.Consumer(session, self._store) try: request = oidconsumer.begin(url) except consumer.DiscoveryFailure, exc: fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),) return {'error': fetch_error_string, 'title': 'OpenID Error'} if request is None: return {'error': 'No OpenID services found', 'title': 'OpenID Error'} req.session.openid_session = session self._add_extensions(request) if request.shouldSendRedirect(): redirect_url = request.redirectURL(realm, redirect_to) return {'action': 'redirect', 'value': redirect_url, 'session_id': req.session_id} else: form_html = request.htmlMarkup(realm, redirect_to) return {'action': 'post', 'value': form_html, 'session_id': req.session_id} @openerpweb.httprequest def process(self, req, **kw): session = getattr(req.session, 'openid_session', None) if not session: return werkzeug.utils.redirect('/') oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer) query = req.httprequest.args info = oidconsumer.complete(query, req.httprequest.base_url) display_identifier = info.getDisplayIdentifier() session['status'] = info.status user_id = None if info.status == consumer.SUCCESS: dbname = session['dbname'] with utils.cursor(dbname) as cr: registry = RegistryManager.get(dbname) Modules = registry.get('ir.module.module') installed = Modules.search_count(cr, 1, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1 if installed: Users = registry.get('res.users') #openid_url = info.endpoint.canonicalID or display_identifier openid_url = session['openid_url'] attrs = self._get_attributes_from_success_response(info) attrs['openid_url'] = openid_url session['attributes'] = attrs openid_email = attrs.get('email', False) domain = [] if openid_email: domain += ['|', ('openid_email', '=', False)] domain += [('openid_email', '=', openid_email)] domain += [ ('openid_url', '=', openid_url), ('active', '=', True), ] ids = Users.search(cr, 1, domain) assert len(ids) < 2 if ids: user_id = ids[0] login = Users.browse(cr, 1, user_id).login key = randomString(utils.KEY_LENGTH, '0123456789abcdef') Users.write(cr, 1, [user_id], {'openid_key': key}) # TODO fill empty fields with the ones from sreg/ax cr.commit() u = req.session.login(dbname, login, key) if not user_id: session['message'] = 'This OpenID identifier is not associated to any active users' elif info.status == consumer.SETUP_NEEDED: session['message'] = info.setup_url elif info.status == consumer.FAILURE and display_identifier: fmt = "Verification of %s failed: %s" session['message'] = fmt % (display_identifier, info.message) else: # FAILURE # Either we don't understand the code or there is no # openid_url included with the error. Give a generic # failure message. The library should supply debug # information in a log. session['message'] = 'Verification failed.' fragment = '#loginerror' if not user_id else '' return werkzeug.utils.redirect('/'+fragment) @openerpweb.jsonrequest def status(self, req): session = getattr(req.session, 'openid_session', {}) return {'status': session.get('status'), 'message': session.get('message')} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
amenonsen/ansible
lib/ansible/modules/cloud/google/gcp_compute_instance_group_manager_info.py
5
10526
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_instance_group_manager_info description: - Gather info for GCP InstanceGroupManager - This module was called C(gcp_compute_instance_group_manager_facts) before Ansible 2.9. The usage has not changed. short_description: Gather info for GCP InstanceGroupManager version_added: 2.7 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: filters: description: - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). - Each additional filter in the list will act be added as an AND condition (filter1 and filter2) . type: list zone: description: - The zone the managed instance group resides. required: true type: str extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: get info on an instance group manager gcp_compute_instance_group_manager_info: zone: us-west1-a filters: - name = test_object project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' resources: description: List of resources returned: always type: complex contains: baseInstanceName: description: - The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. - The base instance name must comply with RFC1035. returned: success type: str creationTimestamp: description: - The creation timestamp for this managed instance group in RFC3339 text format. returned: success type: str currentActions: description: - The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions. returned: success type: complex contains: abandoning: description: - The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it. returned: success type: int creating: description: - The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully. - If you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated. returned: success type: int creatingWithoutRetries: description: - The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly. returned: success type: int deleting: description: - The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted. returned: success type: int none: description: - The number of instances in the managed instance group that are running and have no scheduled actions. returned: success type: int recreating: description: - The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. - Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template. returned: success type: int refreshing: description: - The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance. returned: success type: int restarting: description: - The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted. returned: success type: int description: description: - An optional description of this resource. Provide this property when you create the resource. returned: success type: str id: description: - A unique identifier for this resource. returned: success type: int instanceGroup: description: - The instance group being managed. returned: success type: dict instanceTemplate: description: - The instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. returned: success type: dict name: description: - The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str namedPorts: description: - Named ports configured for the Instance Groups complementary to this Instance Group Manager. returned: success type: complex contains: name: description: - The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str port: description: - The port number, which can be a value between 1 and 65535. returned: success type: int region: description: - The region this managed instance group resides (for regional resources). returned: success type: str targetPools: description: - TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group. returned: success type: list targetSize: description: - The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number. returned: success type: int zone: description: - The zone the managed instance group resides. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) if module._name == 'gcp_compute_instance_group_manager_facts': module.deprecate("The 'gcp_compute_instance_group_manager_facts' module has been renamed to 'gcp_compute_instance_group_manager_info'", version='2.13') if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params) def fetch_list(module, link, query): auth = GcpSession(module, 'compute') return auth.list(link, return_if_object, array_name='items', params={'filter': query}) def query_options(filters): if not filters: return '' if len(filters) == 1: return filters[0] else: queries = [] for f in filters: # For multiple queries, all queries should have () if f[0] != '(' and f[-1] != ')': queries.append("(%s)" % ''.join(f)) else: queries.append(f) return ' '.join(queries) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
gpl-3.0
tosunkaya/adsbypasser
deploy/mirrors/openuserjs.py
3
1968
import mechanize import requests import urllib from summary import make_summary def exec_(config, edition, another_edition, script): USERNAME = config['USERNAME'] SCRIPTNAME = config[edition]['SCRIPTNAME'] GITHUB_USERNAME = config['GITHUB_USERNAME'] GITHUB_PASSWORD = config['GITHUB_PASSWORD'] HOME_URL = 'https://openuserjs.org' LOGIN_URL = '{0}/register'.format(HOME_URL) SCRIPT_URL = '{0}/user/add/scripts/new'.format(HOME_URL) ABOUT_URL = '{0}/script/{1}/edit'.format(HOME_URL, SCRIPTNAME) URL_PARAM = '/scripts/{0}/{1}/source'.format(USERNAME, SCRIPTNAME) summary = make_summary() another_edition = config[another_edition] another_edition = 'https://openuserjs.org/scripts/{0}/{1}'.format(USERNAME, another_edition['SCRIPTNAME']) summary = summary.getResult(edition, another_edition) b = mechanize.Browser() b.set_handle_robots(False) # home page b.open(LOGIN_URL) b.select_form(nr=0) b['username'] = USERNAME b.submit() # github login b.select_form(nr=1) b['login'] = GITHUB_USERNAME b['password'] = GITHUB_PASSWORD b.submit() # edit source # can not simply use mechanize because the form is generate by javascript jar = b._ua_handlers['_cookies'].cookiejar cookies = {c.name: c.value for c in jar} cookies = { 'connect.sid': urllib.unquote(cookies['connect.sid']), } # somehow the SSL verification will fail r = requests.post(SCRIPT_URL, cookies=cookies, verify=False, data={ 'source': script.encode('utf-8'), 'url': URL_PARAM, }) # edit metadata b.open(ABOUT_URL) b.select_form(nr=0) b.find_control('groups').readonly = False b['about'] = summary.encode('utf-8') b['groups'] = 'ads' b.submit() # ex: ts=4 sts=4 sw=4 et # sublime: tab_size 4; translate_tabs_to_spaces true; detect_indentation false; use_tab_stops true; # kate: space-indent on; indent-width 4;
bsd-2-clause
NeostreamTechnology/Microservices
venv/lib/python2.7/site-packages/werkzeug/wsgi.py
85
42838
# -*- coding: utf-8 -*- """ werkzeug.wsgi ~~~~~~~~~~~~~ This module implements WSGI related helpers. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import os import posixpath import mimetypes from itertools import chain from zlib import adler32 from time import time, mktime from datetime import datetime from functools import partial, update_wrapper from werkzeug._compat import iteritems, text_type, string_types, \ implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \ wsgi_get_bytes, try_coerce_native, PY2, BytesIO from werkzeug._internal import _empty_stream, _encode_idna from werkzeug.http import is_resource_modified, http_date from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join from werkzeug.filesystem import get_filesystem_encoding def responder(f): """Marks a function as responder. Decorate a function with it and it will automatically call the return value as WSGI application. Example:: @responder def application(environ, start_response): return Response('Hello World!') """ return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) def get_current_url(environ, root_only=False, strip_querystring=False, host_only=False, trusted_hosts=None): """A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here's an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. Note that the string returned might contain unicode characters as the representation is an IRI not an URI. If you need an ASCII only representation you can use the :func:`~werkzeug.urls.iri_to_uri` function: >>> from werkzeug.urls import iri_to_uri >>> iri_to_uri(get_current_url(env)) 'http://localhost/script/?param=foo' :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)] cat = tmp.append if host_only: return uri_to_iri(''.join(tmp) + '/') cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/')) cat('/') if not root_only: cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/'))) if not strip_querystring: qs = get_query_string(environ) if qs: cat('?' + qs) return uri_to_iri(''.join(tmp)) def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) try: hostname = _normalize(hostname) except UnicodeError: return False for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False try: ref = _normalize(ref) except UnicodeError: return False if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False def get_host(environ, trusted_hosts=None): """Return the real host for the given WSGI environment. This first checks the `X-Forwarded-Host` header, then the normal `Host` header, and finally the `SERVER_NAME` environment variable (using the first one it finds). Optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. :param environ: the WSGI environment to get the host of. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ if 'HTTP_X_FORWARDED_HOST' in environ: rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip() elif 'HTTP_HOST' in environ: rv = environ['HTTP_HOST'] else: rv = environ['SERVER_NAME'] if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ in (('https', '443'), ('http', '80')): rv += ':' + environ['SERVER_PORT'] if trusted_hosts is not None: if not host_is_trusted(rv, trusted_hosts): from werkzeug.exceptions import SecurityError raise SecurityError('Host "%s" is not trusted' % rv) return rv def get_content_length(environ): """Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from. """ content_length = environ.get('CONTENT_LENGTH') if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass def get_input_stream(environ, safe_fallback=True): """Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates whether the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases. """ stream = environ['wsgi.input'] content_length = get_content_length(environ) # A wsgi extension that tells us if the input is terminated. In # that case we return the stream unchanged as we know we can safely # read it until the end. if environ.get('wsgi.input_terminated'): return stream # If we don't have a content length we fall back to an empty stream # in case of a safe fallback, otherwise we return the stream unchanged. # The non-safe fallback is not recommended but might be useful in # some situations. if content_length is None: return safe_fallback and _empty_stream or stream # Otherwise limit the stream to the content length return LimitedStream(stream, content_length) def get_query_string(environ): """Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from. """ qs = wsgi_get_bytes(environ.get('QUERY_STRING', '')) # QUERY_STRING really should be ascii safe but some browsers # will send us some unicode stuff (I am looking at you IE). # In that case we want to urllib quote it badly. return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),')) def get_path_info(environ, charset='utf-8', errors='replace'): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('PATH_INFO', '')) return to_unicode(path, charset, errors, allow_none_charset=True) def get_script_name(environ, charset='utf-8', errors='replace'): """Returns the `SCRIPT_NAME` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('SCRIPT_NAME', '')) return to_unicode(path, charset, errors, allow_none_charset=True) def pop_path_info(environ, charset='utf-8', errors='replace'): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get('PATH_INFO') if not path: return None script_name = environ.get('SCRIPT_NAME', '') # shift multiple leading slashes over old_path = path path = path.lstrip('/') if path != old_path: script_name += '/' * (len(old_path) - len(path)) if '/' not in path: environ['PATH_INFO'] = '' environ['SCRIPT_NAME'] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split('/', 1) environ['PATH_INFO'] = '/' + path environ['SCRIPT_NAME'] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True) def peek_path_info(environ, charset='utf-8', errors='replace'): """Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked. """ segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1) if segments: return to_unicode(wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True) def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8', errors='replace', collapse_http_schemes=True): """Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a unicode string, not a bytestring suitable for a WSGI environment. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. .. versionadded:: 0.6 :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. Also accepts IRIs and unicode parameters. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. """ def _normalize_netloc(scheme, netloc): parts = netloc.split(u'@', 1)[-1].split(u':', 1) if len(parts) == 2: netloc, port = parts if (scheme == u'http' and port == u'80') or \ (scheme == u'https' and port == u'443'): port = None else: netloc = parts[0] port = None if port is not None: netloc += u':' + port return netloc # make sure whatever we are working on is a IRI and parse it path = uri_to_iri(path_or_url, charset, errors) if isinstance(environ_or_baseurl, dict): environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) base_iri = uri_to_iri(environ_or_baseurl, charset, errors) base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] cur_scheme, cur_netloc, cur_path, = \ url_parse(url_join(base_iri, path))[:3] # normalize the network location base_netloc = _normalize_netloc(base_scheme, base_netloc) cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) # is that IRI even on a known HTTP scheme? if collapse_http_schemes: for scheme in base_scheme, cur_scheme: if scheme not in (u'http', u'https'): return None else: if not (base_scheme in (u'http', u'https') and base_scheme == cur_scheme): return None # are the netlocs compatible? if base_netloc != cur_netloc: return None # are we below the application path? base_path = base_path.rstrip(u'/') if not cur_path.startswith(base_path): return None return u'/' + cur_path[len(base_path):].lstrip(u'/') class SharedDataMiddleware(object): """A WSGI middleware that provides static content for development environments or simple server setups. Usage is quite simple:: import os from werkzeug.wsgi import SharedDataMiddleware app = SharedDataMiddleware(app, { '/shared': os.path.join(os.path.dirname(__file__), 'shared') }) The contents of the folder ``./shared`` will now be available on ``http://example.com/shared/``. This is pretty useful during development because a standalone media server is not required. One can also mount files on the root folder and still continue to use the application because the shared data middleware forwards all unhandled requests to the application, even if the requests are below one of the shared folders. If `pkg_resources` is available you can also tell the middleware to serve files from package data:: app = SharedDataMiddleware(app, { '/shared': ('myapplication', 'shared_files') }) This will then serve the ``shared_files`` folder in the `myapplication` Python package. The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` rules for files that are not accessible from the web. If `cache` is set to `False` no caching headers are sent. Currently the middleware does not support non ASCII filenames. If the encoding on the file system happens to be the encoding of the URI it may work but this could also be by accident. We strongly suggest using ASCII only file names for static files. The middleware will guess the mimetype using the Python `mimetype` module. If it's unable to figure out the charset it will fall back to `fallback_mimetype`. .. versionchanged:: 0.5 The cache timeout is configurable now. .. versionadded:: 0.6 The `fallback_mimetype` parameter was added. :param app: the application to wrap. If you don't want to wrap an application you can pass it :exc:`NotFound`. :param exports: a dict of exported files and folders. :param disallow: a list of :func:`~fnmatch.fnmatch` rules. :param fallback_mimetype: the fallback mimetype for unknown files. :param cache: enable or disable caching headers. :param cache_timeout: the cache timeout in seconds for the headers. """ def __init__(self, app, exports, disallow=None, cache=True, cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'): self.app = app self.exports = {} self.cache = cache self.cache_timeout = cache_timeout for key, value in iteritems(exports): if isinstance(value, tuple): loader = self.get_package_loader(*value) elif isinstance(value, string_types): if os.path.isfile(value): loader = self.get_file_loader(value) else: loader = self.get_directory_loader(value) else: raise TypeError('unknown def %r' % value) self.exports[key] = loader if disallow is not None: from fnmatch import fnmatch self.is_allowed = lambda x: not fnmatch(x, disallow) self.fallback_mimetype = fallback_mimetype def is_allowed(self, filename): """Subclasses can override this method to disallow the access to certain files. However by providing `disallow` in the constructor this method is overwritten. """ return True def _opener(self, filename): return lambda: ( open(filename, 'rb'), datetime.utcfromtimestamp(os.path.getmtime(filename)), int(os.path.getsize(filename)) ) def get_file_loader(self, filename): return lambda x: (os.path.basename(filename), self._opener(filename)) def get_package_loader(self, package, package_path): from pkg_resources import DefaultProvider, ResourceManager, \ get_provider loadtime = datetime.utcnow() provider = get_provider(package) manager = ResourceManager() filesystem_bound = isinstance(provider, DefaultProvider) def loader(path): if path is None: return None, None path = posixpath.join(package_path, path) if not provider.has_resource(path): return None, None basename = posixpath.basename(path) if filesystem_bound: return basename, self._opener( provider.get_resource_filename(manager, path)) s = provider.get_resource_string(manager, path) return basename, lambda: ( BytesIO(s), loadtime, len(s) ) return loader def get_directory_loader(self, directory): def loader(path): if path is not None: path = os.path.join(directory, path) else: path = directory if os.path.isfile(path): return os.path.basename(path), self._opener(path) return None, None return loader def generate_etag(self, mtime, file_size, real_filename): if not isinstance(real_filename, bytes): real_filename = real_filename.encode(get_filesystem_encoding()) return 'wzsdm-%d-%s-%s' % ( mktime(mtime.timetuple()), file_size, adler32(real_filename) & 0xffffffff ) def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(get_filesystem_encoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip('/') for sep in os.sep, os.altsep: if sep and sep != '/': cleaned_path = cleaned_path.replace(sep, '/') path = '/' + '/'.join(x for x in cleaned_path.split('/') if x and x != '..') file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith('/'): search_path += '/' if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path):]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [('Date', http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [ ('Etag', '"%s"' % etag), ('Cache-Control', 'max-age=%d, public' % timeout) ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response('304 Not Modified', headers) return [] headers.append(('Expires', http_date(time() + timeout))) else: headers.append(('Cache-Control', 'public')) headers.extend(( ('Content-Type', mime_type), ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)) )) start_response('200 OK', headers) return wrap_file(environ, f) class DispatcherMiddleware(object): """Allows one to mount middlewares or applications in a WSGI application. This is useful if you want to combine multiple WSGI applications:: app = DispatcherMiddleware(app, { '/app2': app2, '/app3': app3 }) """ def __init__(self, app, mounts=None): self.app = app self.mounts = mounts or {} def __call__(self, environ, start_response): script = environ.get('PATH_INFO', '') path_info = '' while '/' in script: if script in self.mounts: app = self.mounts[script] break script, last_item = script.rsplit('/', 1) path_info = '/%s%s' % (last_item, path_info) else: app = self.mounts.get(script, self.app) original_script_name = environ.get('SCRIPT_NAME', '') environ['SCRIPT_NAME'] = original_script_name + script environ['PATH_INFO'] = path_info return app(environ, start_response) @implements_iterator class ClosingIterator(object): """The WSGI specification requires that all middlewares and gateways respect the `close` callback of an iterator. Because it is useful to add another close action to a returned iterator and adding a custom iterator is a boring task this class can be used for that:: return ClosingIterator(app(environ, start_response), [cleanup_session, cleanup_locals]) If there is just one close function it can be passed instead of the list. A closing iterator is not needed if the application uses response objects and finishes the processing if the response is started:: try: return response(environ, start_response) finally: cleanup_session() cleanup_locals() """ def __init__(self, iterable, callbacks=None): iterator = iter(iterable) self._next = partial(next, iterator) if callbacks is None: callbacks = [] elif callable(callbacks): callbacks = [callbacks] else: callbacks = list(callbacks) iterable_close = getattr(iterator, 'close', None) if iterable_close: callbacks.insert(0, iterable_close) self._callbacks = callbacks def __iter__(self): return self def __next__(self): return self._next() def close(self): for callback in self._callbacks: callback() def wrap_file(environ, file, buffer_size=8192): """Wraps a file. This uses the WSGI server's file wrapper if available or otherwise the generic :class:`FileWrapper`. .. versionadded:: 0.5 If the file wrapper from the WSGI server is used it's important to not iterate over it from inside the application but to pass it through unchanged. If you want to pass out a file wrapper inside a response object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. More information about file wrappers are available in :pep:`333`. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size) @implements_iterator class FileWrapper(object): """This class can be used to convert a :class:`file`-like object into an iterable. It yields `buffer_size` blocks until the file is fully read. You should not use this class directly but rather use the :func:`wrap_file` function that uses the WSGI server's file wrapper support if it's available. .. versionadded:: 0.5 If you're using this object together with a :class:`BaseResponse` you have to use the `direct_passthrough` mode. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ def __init__(self, file, buffer_size=8192): self.file = file self.buffer_size = buffer_size def close(self): if hasattr(self.file, 'close'): self.file.close() def seekable(self): if hasattr(self.file, 'seekable'): return self.file.seekable() if hasattr(self.file, 'seek'): return True return False def seek(self, *args): if hasattr(self.file, 'seek'): self.file.seek(*args) def tell(self): if hasattr(self.file, 'tell'): return self.file.tell() return None def __iter__(self): return self def __next__(self): data = self.file.read(self.buffer_size) if data: return data raise StopIteration() @implements_iterator class _RangeWrapper(object): # private for now, but should we make it public in the future ? """This class can be used to convert an iterable object into an iterable that will only yield a piece of the underlying content. It yields blocks until the underlying stream range is fully read. The yielded blocks will have a size that can't exceed the original iterator defined block size, but that can be smaller. If you're using this object together with a :class:`BaseResponse` you have to use the `direct_passthrough` mode. :param iterable: an iterable object with a :meth:`__next__` method. :param start_byte: byte from which read will start. :param byte_range: how many bytes to read. """ def __init__(self, iterable, start_byte=0, byte_range=None): self.iterable = iter(iterable) self.byte_range = byte_range self.start_byte = start_byte self.end_byte = None if byte_range is not None: self.end_byte = self.start_byte + self.byte_range self.read_length = 0 self.seekable = hasattr(iterable, 'seekable') and iterable.seekable() self.end_reached = False def __iter__(self): return self def _next_chunk(self): try: chunk = next(self.iterable) self.read_length += len(chunk) return chunk except StopIteration: self.end_reached = True raise def _first_iteration(self): chunk = None if self.seekable: self.iterable.seek(self.start_byte) self.read_length = self.iterable.tell() contextual_read_length = self.read_length else: while self.read_length <= self.start_byte: chunk = self._next_chunk() if chunk is not None: chunk = chunk[self.start_byte - self.read_length:] contextual_read_length = self.start_byte return chunk, contextual_read_length def _next(self): if self.end_reached: raise StopIteration() chunk = None contextual_read_length = self.read_length if self.read_length == 0: chunk, contextual_read_length = self._first_iteration() if chunk is None: chunk = self._next_chunk() if self.end_byte is not None and self.read_length >= self.end_byte: self.end_reached = True return chunk[:self.end_byte - contextual_read_length] return chunk def __next__(self): chunk = self._next() if chunk: return chunk self.end_reached = True raise StopIteration() def close(self): if hasattr(self.iterable, 'close'): self.iterable.close() def _make_chunk_iter(stream, limit, buffer_size): """Helper for the line and chunk iter functions.""" if isinstance(stream, (bytes, bytearray, text_type)): raise TypeError('Passed a string or byte object instead of ' 'true iterator or stream.') if not hasattr(stream, 'read'): for item in stream: if item: yield item return if not isinstance(stream, LimitedStream) and limit is not None: stream = LimitedStream(stream, limit) _read = stream.read while 1: item = _read(buffer_size) if not item: break yield item def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): """Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return s = make_literal_wrapper(first_item) empty = s('') cr = s('\r') lf = s('\n') crlf = s('\r\n') _iter = chain((first_item,), _iter) def _iter_basic_lines(): _join = empty.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break new_buf = [] buf_size = 0 for item in chain(buffer, new_data.splitlines(True)): new_buf.append(item) buf_size += len(item) if item and item[-1:] in crlf: yield _join(new_buf) new_buf = [] elif cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buffer = new_buf if buffer: yield _join(buffer) # This hackery is necessary to merge 'foo\r' and '\n' into one item # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. previous = empty for item in _iter_basic_lines(): if item == lf and previous[-1:] == cr: previous += item item = empty if previous: yield previous previous = item if previous: yield previous def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return _iter = chain((first_item,), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r'(%s)' % re.escape(separator)).split _join = u''.join else: separator = to_bytes(separator) _split = re.compile(b'(' + re.escape(separator) + b')').split _join = b''.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break chunks = _split(new_data) new_buf = [] buf_size = 0 for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] buf_size = 0 else: buf_size += len(item) new_buf.append(item) if cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buf_size = len(rv) buffer = new_buf if buffer: yield _join(buffer) @implements_iterator class LimitedStream(object): """Wraps a stream so that it doesn't read more than n bytes. If the stream is exhausted and the caller tries to get more bytes from it :func:`on_exhausted` is called which by default returns an empty string. The return value of that function is forwarded to the reader function. So if it returns an empty string :meth:`read` will return an empty string as well. The limit however must never be higher than what the stream can output. Otherwise :meth:`readlines` will try to read past the limit. .. admonition:: Note on WSGI compliance calls to :meth:`readline` and :meth:`readlines` are not WSGI compliant because it passes a size argument to the readline methods. Unfortunately the WSGI PEP is not safely implementable without a size argument to :meth:`readline` because there is no EOF marker in the stream. As a result of that the use of :meth:`readline` is discouraged. For the same reason iterating over the :class:`LimitedStream` is not portable. It internally calls :meth:`readline`. We strongly suggest using :meth:`read` only or using the :func:`make_line_iter` which safely iterates line-based over a WSGI input stream. :param stream: the stream to wrap. :param limit: the limit for the stream, must not be longer than what the string can provide if the stream does not end with `EOF` (like `wsgi.input`) """ def __init__(self, stream, limit): self._read = stream.read self._readline = stream.readline self._pos = 0 self.limit = limit def __iter__(self): return self @property def is_exhausted(self): """If the stream is exhausted this attribute is `True`.""" return self._pos >= self.limit def on_exhausted(self): """This is called when the stream tries to read past the limit. The return value of this function is returned from the reading function. """ # Read null bytes from the stream so that we get the # correct end of stream marker. return self._read(0) def on_disconnect(self): """What should happen if a disconnect is detected? The return value of this function is returned from read functions in case the client went away. By default a :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. """ from werkzeug.exceptions import ClientDisconnected raise ClientDisconnected() def exhaust(self, chunk_size=1024 * 64): """Exhaust the stream. This consumes all the data left until the limit is reached. :param chunk_size: the size for a chunk. It will read the chunk until the stream is exhausted and throw away the results. """ to_read = self.limit - self._pos chunk = chunk_size while to_read > 0: chunk = min(to_read, chunk) self.read(chunk) to_read -= chunk def read(self, size=None): """Read `size` bytes or if size is not provided everything is read. :param size: the number of bytes read. """ if self._pos >= self.limit: return self.on_exhausted() if size is None or size == -1: # -1 is for consistence with file size = self.limit to_read = min(self.limit - self._pos, size) try: read = self._read(to_read) except (IOError, ValueError): return self.on_disconnect() if to_read and len(read) != to_read: return self.on_disconnect() self._pos += len(read) return read def readline(self, size=None): """Reads one line from the stream.""" if self._pos >= self.limit: return self.on_exhausted() if size is None: size = self.limit - self._pos else: size = min(size, self.limit - self._pos) try: line = self._readline(size) except (ValueError, IOError): return self.on_disconnect() if size and not line: return self.on_disconnect() self._pos += len(line) return line def readlines(self, size=None): """Reads a file into a list of strings. It calls :meth:`readline` until the file is read to the end. It does support the optional `size` argument if the underlaying stream supports it for `readline`. """ last_pos = self._pos result = [] if size is not None: end = min(self.limit, last_pos + size) else: end = self.limit while 1: if size is not None: size -= last_pos - self._pos if self._pos >= end: break result.append(self.readline(size)) if size is not None: last_pos = self._pos return result def tell(self): """Returns the position of the stream. .. versionadded:: 0.9 """ return self._pos def __next__(self): line = self.readline() if not line: raise StopIteration() return line
mit
stevedevelope17/Pizzerie
node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py
1509
17165
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Handle version information related to Visual Stuio.""" import errno import os import re import subprocess import sys import gyp import glob class VisualStudioVersion(object): """Information regarding a version of Visual Studio.""" def __init__(self, short_name, description, solution_version, project_version, flat_sln, uses_vcxproj, path, sdk_based, default_toolset=None): self.short_name = short_name self.description = description self.solution_version = solution_version self.project_version = project_version self.flat_sln = flat_sln self.uses_vcxproj = uses_vcxproj self.path = path self.sdk_based = sdk_based self.default_toolset = default_toolset def ShortName(self): return self.short_name def Description(self): """Get the full description of the version.""" return self.description def SolutionVersion(self): """Get the version number of the sln files.""" return self.solution_version def ProjectVersion(self): """Get the version number of the vcproj or vcxproj files.""" return self.project_version def FlatSolution(self): return self.flat_sln def UsesVcxproj(self): """Returns true if this version uses a vcxproj file.""" return self.uses_vcxproj def ProjectExtension(self): """Returns the file extension for the project.""" return self.uses_vcxproj and '.vcxproj' or '.vcproj' def Path(self): """Returns the path to Visual Studio installation.""" return self.path def ToolPath(self, tool): """Returns the path to a given compiler tool. """ return os.path.normpath(os.path.join(self.path, "VC/bin", tool)) def DefaultToolset(self): """Returns the msbuild toolset version that will be used in the absence of a user override.""" return self.default_toolset def SetupScript(self, target_arch): """Returns a command (with arguments) to be used to set up the environment.""" # Check if we are running in the SDK command line environment and use # the setup script from the SDK if so. |target_arch| should be either # 'x86' or 'x64'. assert target_arch in ('x86', 'x64') sdk_dir = os.environ.get('WindowsSDKDir') if self.sdk_based and sdk_dir: return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')), '/' + target_arch] else: # We don't use VC/vcvarsall.bat for x86 because vcvarsall calls # vcvars32, which it can only find if VS??COMNTOOLS is set, which it # isn't always. if target_arch == 'x86': if self.short_name >= '2013' and self.short_name[-1] != 'e' and ( os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): # VS2013 and later, non-Express have a x64-x86 cross that we want # to prefer. return [os.path.normpath( os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86'] # Otherwise, the standard x86 compiler. return [os.path.normpath( os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))] else: assert target_arch == 'x64' arg = 'x86_amd64' # Use the 64-on-64 compiler if we're not using an express # edition and we're running on a 64bit OS. if self.short_name[-1] != 'e' and ( os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): arg = 'amd64' return [os.path.normpath( os.path.join(self.path, 'VC/vcvarsall.bat')), arg] def _RegistryQueryBase(sysdir, key, value): """Use reg.exe to read a particular key. While ideally we might use the win32 module, we would like gyp to be python neutral, so for instance cygwin python lacks this module. Arguments: sysdir: The system subdirectory to attempt to launch reg.exe from. key: The registry key to read from. value: The particular value to read. Return: stdout from reg.exe, or None for failure. """ # Skip if not on Windows or Python Win32 setup issue if sys.platform not in ('win32', 'cygwin'): return None # Setup params to pass to and attempt to launch reg.exe cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'), 'query', key] if value: cmd.extend(['/v', value]) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Obtain the stdout from reg.exe, reading to the end so p.returncode is valid # Note that the error text may be in [1] in some cases text = p.communicate()[0] # Check return code from reg.exe; officially 0==success and 1==error if p.returncode: return None return text def _RegistryQuery(key, value=None): r"""Use reg.exe to read a particular key through _RegistryQueryBase. First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If that fails, it falls back to System32. Sysnative is available on Vista and up and available on Windows Server 2003 and XP through KB patch 942589. Note that Sysnative will always fail if using 64-bit python due to it being a virtual directory and System32 will work correctly in the first place. KB 942589 - http://support.microsoft.com/kb/942589/en-us. Arguments: key: The registry key. value: The particular registry value to read (optional). Return: stdout from reg.exe, or None for failure. """ text = None try: text = _RegistryQueryBase('Sysnative', key, value) except OSError, e: if e.errno == errno.ENOENT: text = _RegistryQueryBase('System32', key, value) else: raise return text def _RegistryGetValueUsingWinReg(key, value): """Use the _winreg module to obtain the value of a registry key. Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure. Throws ImportError if _winreg is unavailable. """ import _winreg try: root, subkey = key.split('\\', 1) assert root == 'HKLM' # Only need HKLM for now. with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey: return _winreg.QueryValueEx(hkey, value)[0] except WindowsError: return None def _RegistryGetValue(key, value): """Use _winreg or reg.exe to obtain the value of a registry key. Using _winreg is preferable because it solves an issue on some corporate environments where access to reg.exe is locked down. However, we still need to fallback to reg.exe for the case where the _winreg module is not available (for example in cygwin python). Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure. """ try: return _RegistryGetValueUsingWinReg(key, value) except ImportError: pass # Fallback to reg.exe if we fail to import _winreg. text = _RegistryQuery(key, value) if not text: return None # Extract value. match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text) if not match: return None return match.group(1) def _CreateVersion(name, path, sdk_based=False): """Sets up MSVS project generation. Setup is based off the GYP_MSVS_VERSION environment variable or whatever is autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is passed in that doesn't match a value in versions python will throw a error. """ if path: path = os.path.normpath(path) versions = { '2015': VisualStudioVersion('2015', 'Visual Studio 2015', solution_version='12.00', project_version='14.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v140'), '2013': VisualStudioVersion('2013', 'Visual Studio 2013', solution_version='13.00', project_version='12.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v120'), '2013e': VisualStudioVersion('2013e', 'Visual Studio 2013', solution_version='13.00', project_version='12.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v120'), '2012': VisualStudioVersion('2012', 'Visual Studio 2012', solution_version='12.00', project_version='4.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v110'), '2012e': VisualStudioVersion('2012e', 'Visual Studio 2012', solution_version='12.00', project_version='4.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v110'), '2010': VisualStudioVersion('2010', 'Visual Studio 2010', solution_version='11.00', project_version='4.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based), '2010e': VisualStudioVersion('2010e', 'Visual C++ Express 2010', solution_version='11.00', project_version='4.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based), '2008': VisualStudioVersion('2008', 'Visual Studio 2008', solution_version='10.00', project_version='9.00', flat_sln=False, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2008e': VisualStudioVersion('2008e', 'Visual Studio 2008', solution_version='10.00', project_version='9.00', flat_sln=True, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2005': VisualStudioVersion('2005', 'Visual Studio 2005', solution_version='9.00', project_version='8.00', flat_sln=False, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2005e': VisualStudioVersion('2005e', 'Visual Studio 2005', solution_version='9.00', project_version='8.00', flat_sln=True, uses_vcxproj=False, path=path, sdk_based=sdk_based), } return versions[str(name)] def _ConvertToCygpath(path): """Convert to cygwin path if we are using cygwin.""" if sys.platform == 'cygwin': p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE) path = p.communicate()[0].strip() return path def _DetectVisualStudioVersions(versions_to_check, force_express): """Collect the list of installed visual studio versions. Returns: A list of visual studio versions installed in descending order of usage preference. Base this on the registry and a quick check if devenv.exe exists. Only versions 8-10 are considered. Possibilities are: 2005(e) - Visual Studio 2005 (8) 2008(e) - Visual Studio 2008 (9) 2010(e) - Visual Studio 2010 (10) 2012(e) - Visual Studio 2012 (11) 2013(e) - Visual Studio 2013 (12) 2015 - Visual Studio 2015 (14) Where (e) is e for express editions of MSVS and blank otherwise. """ version_to_year = { '8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012', '12.0': '2013', '14.0': '2015', } versions = [] for version in versions_to_check: # Old method of searching for which VS version is installed # We don't use the 2010-encouraged-way because we also want to get the # path to the binaries, which it doesn't offer. keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Microsoft\VCExpress\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version] for index in range(len(keys)): path = _RegistryGetValue(keys[index], 'InstallDir') if not path: continue path = _ConvertToCygpath(path) # Check for full. full_path = os.path.join(path, 'devenv.exe') express_path = os.path.join(path, '*express.exe') if not force_express and os.path.exists(full_path): # Add this one. versions.append(_CreateVersion(version_to_year[version], os.path.join(path, '..', '..'))) # Check for express. elif glob.glob(express_path): # Add this one. versions.append(_CreateVersion(version_to_year[version] + 'e', os.path.join(path, '..', '..'))) # The old method above does not work when only SDK is installed. keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7', r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'] for index in range(len(keys)): path = _RegistryGetValue(keys[index], version) if not path: continue path = _ConvertToCygpath(path) if version != '14.0': # There is no Express edition for 2015. versions.append(_CreateVersion(version_to_year[version] + 'e', os.path.join(path, '..'), sdk_based=True)) return versions def SelectVisualStudioVersion(version='auto', allow_fallback=True): """Select which version of Visual Studio projects to generate. Arguments: version: Hook to allow caller to force a particular version (vs auto). Returns: An object representing a visual studio project format version. """ # In auto mode, check environment variable for override. if version == 'auto': version = os.environ.get('GYP_MSVS_VERSION', 'auto') version_map = { 'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'), '2005': ('8.0',), '2005e': ('8.0',), '2008': ('9.0',), '2008e': ('9.0',), '2010': ('10.0',), '2010e': ('10.0',), '2012': ('11.0',), '2012e': ('11.0',), '2013': ('12.0',), '2013e': ('12.0',), '2015': ('14.0',), } override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH') if override_path: msvs_version = os.environ.get('GYP_MSVS_VERSION') if not msvs_version: raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be ' 'set to a particular version (e.g. 2010e).') return _CreateVersion(msvs_version, override_path, sdk_based=True) version = str(version) versions = _DetectVisualStudioVersions(version_map[version], 'e' in version) if not versions: if not allow_fallback: raise ValueError('Could not locate Visual Studio installation.') if version == 'auto': # Default to 2005 if we couldn't find anything return _CreateVersion('2005', None) else: return _CreateVersion(version, None) return versions[0]
mit
Scalr/libcloud
libcloud/utils/publickey.py
1
2711
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hashlib from libcloud.utils.py3 import hexadigits from libcloud.utils.py3 import b __all__ = [ 'get_pubkey_openssh_fingerprint', 'get_pubkey_ssh2_fingerprint', 'get_pubkey_comment' ] try: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization cryptography_available = True except ImportError: cryptography_available = False def _to_md5_fingerprint(data): hashed = hashlib.md5(data).digest() return ":".join(hexadigits(hashed)) def get_pubkey_openssh_fingerprint(pubkey): # We import and export the key to make sure it is in OpenSSH format if not cryptography_available: raise RuntimeError('cryptography is not available') public_key = serialization.load_ssh_public_key( b(pubkey), backend=default_backend() ) pub_openssh = public_key.public_bytes( encoding=serialization.Encoding.OpenSSH, format=serialization.PublicFormat.OpenSSH, )[7:] # strip ssh-rsa prefix return _to_md5_fingerprint(base64.decodestring(pub_openssh)) def get_pubkey_ssh2_fingerprint(pubkey): # This is the format that EC2 shows for public key fingerprints in its # KeyPair mgmt API if not cryptography_available: raise RuntimeError('cryptography is not available') public_key = serialization.load_ssh_public_key( b(pubkey), backend=default_backend() ) pub_der = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo, ) return _to_md5_fingerprint(pub_der) def get_pubkey_comment(pubkey, default=None): if pubkey.startswith("ssh-"): # This is probably an OpenSSH key return pubkey.strip().split(' ', 3)[2] if default: return default raise ValueError('Public key is not in a supported format')
apache-2.0
ichuang/sympy
sympy/polys/tests/test_distributedpolys.py
2
7404
"""Tests for sparse distributed polynomials. """ from sympy.polys.distributedpolys import ( sdp_LC, sdp_LM, sdp_LT, sdp_del_LT, sdp_coeffs, sdp_monoms, sdp_sort, sdp_strip, sdp_normal, sdp_from_dict, sdp_to_dict, sdp_indep_p, sdp_one_p, sdp_one, sdp_term_p, sdp_abs, sdp_neg, sdp_add_term, sdp_sub_term, sdp_mul_term, sdp_add, sdp_sub, sdp_mul, sdp_sqr, sdp_pow, sdp_monic, sdp_content, sdp_primitive, _term_rr_div, _term_ff_div, sdp_div, sdp_quo, sdp_rem, sdp_lcm, sdp_gcd, ) from sympy.polys.monomialtools import ( lex, grlex, grevlex, ) from sympy.polys.polyerrors import ( ExactQuotientFailed, DomainError, ) from sympy.polys.domains import ZZ, QQ from sympy import S, Symbol, symbols from sympy.utilities.pytest import raises, skip, XFAIL def test_sdp_LC(): assert sdp_LC([], QQ) == QQ(0) assert sdp_LC([((1,0), QQ(1,2))], QQ) == QQ(1,2) assert sdp_LC([((1,1), QQ(1,4)), ((1,0), QQ(1,2))], QQ) == QQ(1,4) def test_sdp_LM(): assert sdp_LM([], 1) == (0, 0) assert sdp_LM([((1,0), QQ(1,2))], 1) == (1, 0) assert sdp_LM([((1,1), QQ(1,4)), ((1,0), QQ(1,2))], 1) == (1, 1) def test_sdp_LT(): assert sdp_LT([], 1, QQ) == ((0, 0), QQ(0)) assert sdp_LT([((1,0), QQ(1,2))], 1, QQ) == ((1, 0), QQ(1,2)) assert sdp_LT([((1,1), QQ(1,4)), ((1,0), QQ(1,2))], 1, QQ) == ((1, 1), QQ(1,4)) def test_sdp_del_LT(): assert sdp_del_LT([]) == [] assert sdp_del_LT([((1,0), QQ(1,2))]) == [] assert sdp_del_LT([((1,1), QQ(1,4)), ((1,0), QQ(1,2))]) == [((1,0), QQ(1,2))] def test_sdp_coeffs(): assert sdp_coeffs([]) == [] assert sdp_coeffs([((1,0), QQ(1,2))]) == [QQ(1,2)] assert sdp_coeffs([((1,1), QQ(1,4)), ((1,0), QQ(1,2))]) == [QQ(1,4), QQ(1,2)] def test_sdp_monoms(): assert sdp_monoms([]) == [] assert sdp_monoms([((1,0), QQ(1,2))]) == [(1,0)] assert sdp_monoms([((1,1), QQ(1,4)), ((1,0), QQ(1,2))]) == [(1,1), (1,0)] def test_sdp_sort(): pass def test_sdp_strip(): assert sdp_strip([((2,2), 0), ((1,1), 1), ((0,0), 0)]) == [((1,1), 1)] def test_sdp_normal(): pass def test_sdp_from_dict(): pass def test_sdp_indep_p(): pass def test_sdp_one_p(): pass def test_sdp_one(): pass def test_sdp_term_p(): pass def test_sdp_abs(): pass def test_sdp_neg(): pass def test_sdp_add_term(): pass def test_sdp_sub_term(): pass def test_sdp_mul_term(): pass def test_sdp_add(): pass def test_sdp_sub(): pass def test_sdp_mul(): pass def test_sdp_sqr(): pass def test_sdp_pow(): f = sdp_from_dict({(1,): 2, (0,): 3}, grlex) assert sdp_pow(f, 0, 0, grlex, ZZ) == sdp_one(0, ZZ) assert sdp_pow(f, 1, 0, grlex, ZZ) == f assert sdp_pow(f, 2, 0, grlex, ZZ) == \ sdp_from_dict({(2,): 4, (1,): 12, (0,): 9}, grlex) assert sdp_pow(f, 3, 0, grlex, ZZ) == \ sdp_from_dict({(3,): 8, (2,): 36, (1,): 54, (0,): 27}, grlex) assert sdp_pow(f, 4, 0, grlex, ZZ) == \ sdp_from_dict({(4,): 16, (3,): 96, (2,): 216, (1,): 216, (0,): 81}, grlex) assert sdp_pow(f, 5, 0, grlex, ZZ) == \ sdp_from_dict({(5,): 32, (4,): 240, (3,): 720, (2,): 1080, (1,): 810, (0,): 243}, grlex) f = sdp_from_dict({(3,1,0): 1, (1,2,0): -2, (0,0,1): -3, (0,0,0): 1}, grlex) g = sdp_from_dict({(6,2,0): 1, (4,3,0): -4, (2,4,0): 4, (3,1,1): -6, (3,1,0): 2, (1,2,1): 12, (1,2,0): -4, (0,0,2): 9, (0,0,1): -6, (0,0,0): 1}, grlex) assert sdp_pow(f, 2, 2, grlex, ZZ) == g raises(ValueError, "sdp_pow(f, -2, 2, grlex, ZZ)") def test_sdp_monic(): pass def test_sdp_content(): pass def test_sdp_primitive(): pass def test_sdp_div(): f = sdp_from_dict({(2,1): 4, (1,1): -2, (1,0): 4, (0,1): -2, (0,0): 8}, grlex) assert sdp_div(f, [sdp_from_dict({(0,0): 2}, grlex)], 1, grlex, ZZ) == \ ([sdp_from_dict({(2,1): 2, (1,1): -1, (1,0): 2, (0,1): -1, (0,0): 4}, grlex)], []) assert sdp_div(f, [sdp_from_dict({(0,1): 2}, grlex)], 1, grlex, ZZ) == \ ([sdp_from_dict({(2,0): 2, (1,0): -1, (0,0): -1}, grlex)], sdp_from_dict({(1,0): 4, (0,0): 8}, grlex)) f = sdp_from_dict({(1,0): 1, (0,0): -1}, grlex) g = sdp_from_dict({(0,1): 1, (0,0): -1}, grlex) assert sdp_div(f, [g], 1, grlex, ZZ) == ([[]], f) f = sdp_from_dict({(3,): 1, (2,): -12, (0,): -42}, grlex) g = sdp_from_dict({(1,): 1, (0,): -3}, grlex) q = sdp_from_dict({(2,): 1, (1,): -9, (0,): -27}, grlex) r = sdp_from_dict({(0,): -123}, grlex) assert sdp_div(f, [g], 0, grlex, ZZ) == ([q], r) f = sdp_from_dict({(2,): QQ(1), (1,): QQ(2), (0,): QQ(2)}, grlex) g = sdp_from_dict({(0,): QQ(1)}, grlex) h = sdp_from_dict({(0,): QQ(2)}, grlex) q = sdp_from_dict({(2,): QQ(1,2), (1,): QQ(1), (0,): QQ(1)}, grlex) assert sdp_div(f, [g], 0, grlex, QQ) == ([f], []) assert sdp_div(f, [h], 0, grlex, QQ) == ([q], []) f = sdp_from_dict({(1,2): 1, (0,0): 1}, grlex) G = [sdp_from_dict({(1,1): 1, (0,0): 1}, grlex), sdp_from_dict({(0,1): 1, (0,0): 1}, grlex)] Q = [sdp_from_dict({(0,1): 1}, grlex), sdp_from_dict({(0,0): -1}, grlex)] r = sdp_from_dict({(0,0): 2}, grlex) assert sdp_div(f, G, 1, grlex, ZZ) == (Q, r) f = sdp_from_dict({(2,1): 1, (1,2): 1, (0,2): 1}, grlex) G = [sdp_from_dict({(1,1): 1, (0,0): -1}, grlex), sdp_from_dict({(0,2): 1, (0,0): -1}, grlex)] Q = [sdp_from_dict({(1,0): 1, (0,1): 1}, grlex), sdp_from_dict({(0,0): 1}, grlex)] r = sdp_from_dict({(1,0): 1, (0,1): 1, (0,0): 1}, grlex) assert sdp_div(f, G, 1, grlex, ZZ) == (Q, r) G = [sdp_from_dict({(0,2): 1, (0,0): -1}, grlex), sdp_from_dict({(1,1): 1, (0,0): -1}, grlex)] Q = [sdp_from_dict({(1,0): 1, (0,0): 1}, grlex), sdp_from_dict({(1,0): 1}, grlex)] r = sdp_from_dict({(1,0): 2, (0,0): 1}, grlex) assert sdp_div(f, G, 1, grlex, ZZ) == (Q, r) def test_sdp_rem(): f = sdp_from_dict({(2,1): 4, (1,1): -2, (1,0): 4, (0,1): -2, (0,0): 8}, grlex) assert sdp_rem(f, [sdp_from_dict({(0,0): 2}, grlex)], 1, grlex, ZZ) == [] assert sdp_rem(f, [sdp_from_dict({(0,1): 2}, grlex)], 1, grlex, ZZ) == \ sdp_from_dict({(1,0): 4, (0,0): 8}, grlex) f = sdp_from_dict({(1,0): 1, (0,0): -1}, grlex) g = sdp_from_dict({(0,1): 1, (0,0): -1}, grlex) assert sdp_rem(f, [g], 1, grlex, ZZ) == f f = sdp_from_dict({(3,): 1, (2,): -12, (0,): -42}, grlex) g = sdp_from_dict({(1,): 1, (0,): -3}, grlex) r = sdp_from_dict({(0,): -123}, grlex) assert sdp_rem(f, [g], 0, grlex, ZZ) == r f = sdp_from_dict({(1,2): 1, (0,0): 1}, grlex) G = [sdp_from_dict({(1,1): 1, (0,0): 1}, grlex), sdp_from_dict({(0,1): 1, (0,0): 1}, grlex)] r = sdp_from_dict({(0,0): 2}, grlex) assert sdp_rem(f, G, 1, grlex, ZZ) == r f = sdp_from_dict({(2,1): 1, (1,2): 1, (0,2): 1}, grlex) G = [sdp_from_dict({(1,1): 1, (0,0): -1}, grlex), sdp_from_dict({(0,2): 1, (0,0): -1}, grlex)] r = sdp_from_dict({(1,0): 1, (0,1): 1, (0,0): 1}, grlex) assert sdp_rem(f, G, 1, grlex, ZZ) == r G = [sdp_from_dict({(0,2): 1, (0,0): -1}, grlex), sdp_from_dict({(1,1): 1, (0,0): -1}, grlex)] r = sdp_from_dict({(1,0): 2, (0,0): 1}, grlex) assert sdp_rem(f, G, 1, grlex, ZZ) == r def test_sdp_lcm(): pass def test_sdp_gcd(): pass
bsd-3-clause
timothyclemans/checklistsforglass
checklistsforglass/urls.py
1
2844
from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() from .views import AuthComplete, LoginError urlpatterns = patterns('', # Examples: url(r'^$', 'checklistsforglass.views.home', name='home'), url(r'^edit_checklist/(?P<checklist_id>\d+)/$', 'checklistsforglass.views.edit_checklist', name='edit_checklist'), url(r'^audit_trail/(?P<data_id>\d+)/$', 'checklistsforglass.views.audit_trail', name='audit_trail'), url(r'^audit_trails/$', 'checklistsforglass.views.audit_trails', name='audit_trails'), url(r'^delete_checklist/(?P<checklist_id>\d+)/$', 'checklistsforglass.views.delete_checklist', name='delete_checklist'), url(r'^delete_checklistV2/(?P<checklist_id>\d+)/$', 'checklistsforglass.views.delete_checklistV2', name='delete_checklistV2'), url(r'^create_checklist/$', 'checklistsforglass.views.create_checklist', name='create_checklist'), url(r'^edit_checklistV2/(?P<checklist_id>\d+)/$', 'checklistsforglass.views.edit_checklistV2', name='edit_checklistV2'), url(r'^export/$', 'checklistsforglass.views.export', name='export'), url(r'^create_checklistV2/$', 'checklistsforglass.views.create_checklistV2', name='create_checklistV2'), url(r'^edit_checklistV2/(?P<checklist_id>\d+)/save_image/$', 'checklistsforglass.views.save_image', name='save_image'), # url(r'^checklistsforglass/', include('checklistsforglass.foo.urls')), url(r'^is_registered/(?P<serial_number>[\w\d]+)/$', 'checklistsforglass.views.is_registered'), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^install/$', 'checklistsforglass.views.install'), url(r'^unregistered_devices/$', 'checklistsforglass.views.unregistered_devices'), url(r'^register_device/(?P<serial_number>[\w\d]+)/$', 'checklistsforglass.views.register_device'), url(r'^get_users_checklists/(?P<user_id>\d+)/$', 'checklistsforglass.views.get_users_checklists'), url(r'^get_all_checklists/$', 'checklistsforglass.views.get_all_checklists'), url(r'^get_checklists/(?P<serial_number>[\w\d]+)/$', 'checklistsforglass.views.get_checklists'), url(r'^get_full_json/$', 'checklistsforglass.views.get_full_json'), url(r'^get_full_checklist/(?P<checklist_id>\d+)/$', 'checklistsforglass.views.get_full_checklist'), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^save_data/$', 'checklistsforglass.views.save_data'), url(r'^complete/(?P<backend>[^/]+)/$', AuthComplete.as_view()), url(r'^login-error/$', LoginError.as_view()), url(r'', include('social_auth.urls')), url(r'^logout$', 'django.contrib.auth.views.logout', {'next_page': '/'}), )
apache-2.0
mastizada/kuma
vendor/packages/ipython/IPython/Extensions/ipy_rehashdir.py
8
4425
# -*- coding: utf-8 -*- """ IPython extension: add %rehashdir magic Usage: %rehashdir c:/bin c:/tools - Add all executables under c:/bin and c:/tools to alias table, in order to make them directly executable from any directory. This also serves as an example on how to extend ipython with new magic functions. Unlike rest of ipython, this requires Python 2.4 (optional extensions are allowed to do that). """ import IPython.ipapi ip = IPython.ipapi.get() import os,re,fnmatch,sys def selflaunch(ip,line): """ Launch python script with 'this' interpreter e.g. d:\foo\ipykit.exe a.py """ tup = line.split(None,1) if len(tup) == 1: print "Launching nested ipython session" os.system(sys.executable) return cmd = sys.executable + ' ' + tup[1] print ">",cmd os.system(cmd) class PyLauncher: """ Invoke selflanucher on the specified script This is mostly useful for associating with scripts using:: _ip.defalias('foo',PyLauncher('foo_script.py')) """ def __init__(self,script): self.script = os.path.abspath(script) def __call__(self, ip, line): if self.script.endswith('.ipy'): ip.runlines(open(self.script).read()) else: # first word is the script/alias name itself, strip it tup = line.split(None,1) if len(tup) == 2: tail = ' ' + tup[1] else: tail = '' selflaunch(ip,"py " + self.script + tail) def __repr__(self): return 'PyLauncher("%s")' % self.script def rehashdir_f(self,arg): """ Add executables in all specified dirs to alias table Usage: %rehashdir c:/bin;c:/tools - Add all executables under c:/bin and c:/tools to alias table, in order to make them directly executable from any directory. Without arguments, add all executables in current directory. """ # most of the code copied from Magic.magic_rehashx def isjunk(fname): junk = ['*~'] for j in junk: if fnmatch.fnmatch(fname, j): return True return False created = [] if not arg: arg = '.' path = map(os.path.abspath,arg.split(';')) alias_table = self.shell.alias_table if os.name == 'posix': isexec = lambda fname:os.path.isfile(fname) and \ os.access(fname,os.X_OK) else: try: winext = os.environ['pathext'].replace(';','|').replace('.','') except KeyError: winext = 'exe|com|bat|py' if 'py' not in winext: winext += '|py' execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE) isexec = lambda fname:os.path.isfile(fname) and execre.match(fname) savedir = os.getcwd() try: # write the whole loop for posix/Windows so we don't have an if in # the innermost part if os.name == 'posix': for pdir in path: os.chdir(pdir) for ff in os.listdir(pdir): if isexec(ff) and not isjunk(ff): # each entry in the alias table must be (N,name), # where N is the number of positional arguments of the # alias. src,tgt = os.path.splitext(ff)[0], os.path.abspath(ff) created.append(src) alias_table[src] = (0,tgt) else: for pdir in path: os.chdir(pdir) for ff in os.listdir(pdir): if isexec(ff) and not isjunk(ff): src, tgt = execre.sub(r'\1',ff), os.path.abspath(ff) src = src.lower() created.append(src) alias_table[src] = (0,tgt) # Make sure the alias table doesn't contain keywords or builtins self.shell.alias_table_validate() # Call again init_auto_alias() so we get 'rm -i' and other # modified aliases since %rehashx will probably clobber them # self.shell.init_auto_alias() finally: os.chdir(savedir) return created ip.expose_magic("rehashdir",rehashdir_f)
mpl-2.0
patrioticcow/MessagesForSkype
packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/email/errors.py
468
1628
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: [email protected] """email package exception classes.""" class MessageError(Exception): """Base class for errors in the email package.""" class MessageParseError(MessageError): """Base class for message parsing errors.""" class HeaderParseError(MessageParseError): """Error while parsing headers.""" class BoundaryError(MessageParseError): """Couldn't find terminating boundary.""" class MultipartConversionError(MessageError, TypeError): """Conversion to a multipart is prohibited.""" class CharsetError(MessageError): """An illegal charset was given.""" # These are parsing defects which the parser was able to work around. class MessageDefect: """Base class for a message defect.""" def __init__(self, line=None): self.line = line class NoBoundaryInMultipartDefect(MessageDefect): """A message claimed to be a multipart but had no boundary parameter.""" class StartBoundaryNotFoundDefect(MessageDefect): """The claimed start boundary was never found.""" class FirstHeaderLineIsContinuationDefect(MessageDefect): """A message had a continuation line as its first header line.""" class MisplacedEnvelopeHeaderDefect(MessageDefect): """A 'Unix-from' header was found in the middle of a header block.""" class MalformedHeaderDefect(MessageDefect): """Found a header that was missing a colon, or was otherwise malformed.""" class MultipartInvariantViolationDefect(MessageDefect): """A message claimed to be a multipart but no subparts were found."""
mit
ticosax/django
tests/template_tests/templatetags/inclusion.py
174
8479
import operator from django.template import Engine, Library from django.utils import six engine = Engine(app_dirs=True) register = Library() @register.inclusion_tag('inclusion.html') def inclusion_no_params(): """Expected inclusion_no_params __doc__""" return {"result": "inclusion_no_params - Expected result"} inclusion_no_params.anything = "Expected inclusion_no_params __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_no_params_from_template(): """Expected inclusion_no_params_from_template __doc__""" return {"result": "inclusion_no_params_from_template - Expected result"} inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_one_param(arg): """Expected inclusion_one_param __doc__""" return {"result": "inclusion_one_param - Expected result: %s" % arg} inclusion_one_param.anything = "Expected inclusion_one_param __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_one_param_from_template(arg): """Expected inclusion_one_param_from_template __doc__""" return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg} inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__" @register.inclusion_tag('inclusion.html', takes_context=False) def inclusion_explicit_no_context(arg): """Expected inclusion_explicit_no_context __doc__""" return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg} inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=False) def inclusion_explicit_no_context_from_template(arg): """Expected inclusion_explicit_no_context_from_template __doc__""" return {"result": "inclusion_explicit_no_context_from_template - Expected result: %s" % arg} inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__" @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_no_params_with_context(context): """Expected inclusion_no_params_with_context __doc__""" return {"result": "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']} inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True) def inclusion_no_params_with_context_from_template(context): """Expected inclusion_no_params_with_context_from_template __doc__""" return {"result": "inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value']} inclusion_no_params_with_context_from_template.anything = "Expected inclusion_no_params_with_context_from_template __dict__" @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_params_and_context(context, arg): """Expected inclusion_params_and_context __doc__""" return {"result": "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)} inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True) def inclusion_params_and_context_from_template(context, arg): """Expected inclusion_params_and_context_from_template __doc__""" return {"result": "inclusion_params_and_context_from_template - Expected result (context value: %s): %s" % (context['value'], arg)} inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_two_params(one, two): """Expected inclusion_two_params __doc__""" return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)} inclusion_two_params.anything = "Expected inclusion_two_params __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_two_params_from_template(one, two): """Expected inclusion_two_params_from_template __doc__""" return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)} inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_one_default(one, two='hi'): """Expected inclusion_one_default __doc__""" return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)} inclusion_one_default.anything = "Expected inclusion_one_default __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_one_default_from_template(one, two='hi'): """Expected inclusion_one_default_from_template __doc__""" return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)} inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_unlimited_args(one, two='hi', *args): """Expected inclusion_unlimited_args __doc__""" return {"result": "inclusion_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))} inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_unlimited_args_from_template(one, two='hi', *args): """Expected inclusion_unlimited_args_from_template __doc__""" return {"result": "inclusion_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))} inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_only_unlimited_args(*args): """Expected inclusion_only_unlimited_args __doc__""" return {"result": "inclusion_only_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))} inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_only_unlimited_args_from_template(*args): """Expected inclusion_only_unlimited_args_from_template __doc__""" return {"result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))} inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__" @register.inclusion_tag('test_incl_tag_current_app.html', takes_context=True) def inclusion_tag_current_app(context): """Expected inclusion_tag_current_app __doc__""" return {} inclusion_tag_current_app.anything = "Expected inclusion_tag_current_app __dict__" @register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True) def inclusion_tag_use_l10n(context): """Expected inclusion_tag_use_l10n __doc__""" return {} inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__" @register.inclusion_tag('inclusion.html') def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs): """Expected inclusion_unlimited_args_kwargs __doc__""" # Sort the dictionary by key to guarantee the order for testing. sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0)) return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % ( ', '.join(six.text_type(arg) for arg in [one, two] + list(args)), ', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg) )} inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__" @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_tag_without_context_parameter(arg): """Expected inclusion_tag_without_context_parameter __doc__""" return {} inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__" @register.inclusion_tag('inclusion_extends1.html') def inclusion_extends1(): return {} @register.inclusion_tag('inclusion_extends2.html') def inclusion_extends2(): return {}
bsd-3-clause
twobob/buildroot-kindle
output/build/host-python-2.7.2/Lib/test/test_xrange.py
36
5209
# Python test set -- built-in functions import test.test_support, unittest import sys import pickle import itertools import warnings warnings.filterwarnings("ignore", "integer argument expected", DeprecationWarning, "unittest") # pure Python implementations (3 args only), for comparison def pyrange(start, stop, step): if (start - stop) // step < 0: # replace stop with next element in the sequence of integers # that are congruent to start modulo step. stop += (start - stop) % step while start != stop: yield start start += step def pyrange_reversed(start, stop, step): stop += (start - stop) % step return pyrange(stop - step, start - step, -step) class XrangeTest(unittest.TestCase): def assert_iterators_equal(self, xs, ys, test_id, limit=None): # check that an iterator xs matches the expected results ys, # up to a given limit. if limit is not None: xs = itertools.islice(xs, limit) ys = itertools.islice(ys, limit) sentinel = object() pairs = itertools.izip_longest(xs, ys, fillvalue=sentinel) for i, (x, y) in enumerate(pairs): if x == y: continue elif x == sentinel: self.fail('{}: iterator ended unexpectedly ' 'at position {}; expected {}'.format(test_id, i, y)) elif y == sentinel: self.fail('{}: unexpected excess element {} at ' 'position {}'.format(test_id, x, i)) else: self.fail('{}: wrong element at position {};' 'expected {}, got {}'.format(test_id, i, y, x)) def test_xrange(self): self.assertEqual(list(xrange(3)), [0, 1, 2]) self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4]) self.assertEqual(list(xrange(0)), []) self.assertEqual(list(xrange(-3)), []) self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7]) self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4]) a = 10 b = 100 c = 50 self.assertEqual(list(xrange(a, a+2)), [a, a+1]) self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1]) self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2]) seq = list(xrange(a, b, c)) self.assertIn(a, seq) self.assertNotIn(b, seq) self.assertEqual(len(seq), 2) seq = list(xrange(b, a, -c)) self.assertIn(b, seq) self.assertNotIn(a, seq) self.assertEqual(len(seq), 2) seq = list(xrange(-a, -b, -c)) self.assertIn(-a, seq) self.assertNotIn(-b, seq) self.assertEqual(len(seq), 2) self.assertRaises(TypeError, xrange) self.assertRaises(TypeError, xrange, 1, 2, 3, 4) self.assertRaises(ValueError, xrange, 1, 2, 0) self.assertRaises(OverflowError, xrange, 10**100, 10**101, 10**101) self.assertRaises(TypeError, xrange, 0, "spam") self.assertRaises(TypeError, xrange, 0, 42, "spam") self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2) self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint) self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint) r = xrange(-sys.maxint, sys.maxint, 2) self.assertEqual(len(r), sys.maxint) self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2) def test_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: r = xrange(*t) self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))), list(r)) def test_range_iterators(self): # see issue 7298 limits = [base + jiggle for M in (2**32, 2**64) for base in (-M, -M//2, 0, M//2, M) for jiggle in (-2, -1, 0, 1, 2)] test_ranges = [(start, end, step) for start in limits for end in limits for step in (-2**63, -2**31, -2, -1, 1, 2)] for start, end, step in test_ranges: try: iter1 = xrange(start, end, step) except OverflowError: pass else: iter2 = pyrange(start, end, step) test_id = "xrange({}, {}, {})".format(start, end, step) # check first 100 entries self.assert_iterators_equal(iter1, iter2, test_id, limit=100) try: iter1 = reversed(xrange(start, end, step)) except OverflowError: pass else: iter2 = pyrange_reversed(start, end, step) test_id = "reversed(xrange({}, {}, {}))".format(start, end, step) self.assert_iterators_equal(iter1, iter2, test_id, limit=100) def test_main(): test.test_support.run_unittest(XrangeTest) if __name__ == "__main__": test_main()
gpl-2.0
pylover/network-interfaces
network_interfaces/iface.py
1
1727
# -*- coding: utf-8 -*- from .stanza import MultilineStanza from .errors import ValidationError __author__ = 'vahid' class IfaceBase(MultilineStanza): startup = None @property def name(self): return self._headers[1] @name.setter def name(self, val): self._headers[1] = val def __hash__(self): return hash(self.startup) ^ super(IfaceBase, self).__hash__() def __repr__(self): if self.startup: return '%s\n%s' % (self.startup, super(IfaceBase, self).__repr__()) return super(IfaceBase, self).__repr__() class Iface(IfaceBase): _type = 'iface' @property def address_family(self): return self._headers[2] @address_family.setter def address_family(self, val): self._headers[2] = val @property def method(self): return self._headers[3] @method.setter def method(self, val): self._headers[3] = val @property def address_netmask(self): return '%s/%s' % (self.address, self.netmask) def validate(self, allow_correction=False): # Returning true for now. # FIXME: implement validation # if not (hasattr(self, 'network')): # raise ValidationError() return True class Mapping(IfaceBase): _type = 'mapping' def __getattr__(self, item): if item.startswith('map_'): map_name = item.split('_')[1] key = map_name.replace('_', '-') return ' '.join([i for i in self._items if i[0] == 'map' and i[1] == key][0][2:]) return super(Mapping, self).__getattr__(item) @property def mappings(self): return [i for i in self._items if i[0] == 'map']
gpl-3.0
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/scipy/linalg/interpolative.py
41
31146
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** # Python module for interfacing with `id_dist`. r""" ====================================================================== Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) ====================================================================== .. moduleauthor:: Kenneth L. Ho <[email protected]> .. versionadded:: 0.13 .. currentmodule:: scipy.linalg.interpolative An interpolative decomposition (ID) of a matrix :math:`A \in \mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a factorization .. math:: A \Pi = \begin{bmatrix} A \Pi_{1} & A \Pi_{2} \end{bmatrix} = A \Pi_{1} \begin{bmatrix} I & T \end{bmatrix}, where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with :math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` are the *skeleton* and *interpolation matrices*, respectively. If :math:`A` does not have exact rank :math:`k`, then there exists an approximation in the form of an ID such that :math:`A = BP + E`, where :math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + 1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k + 1}` is the best possible error for a rank-:math:`k` approximation and, in fact, is achieved by the singular value decomposition (SVD) :math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k \times k}` is diagonal with nonnegative entries. The principal advantages of using an ID over an SVD are that: - it is cheaper to construct; - it preserves the structure of :math:`A`; and - it is more efficient to compute with in light of the identity submatrix of :math:`P`. Routines ======== Main functionality: .. autosummary:: :toctree: generated/ interp_decomp reconstruct_matrix_from_id reconstruct_interp_matrix reconstruct_skel_matrix id_to_svd svd estimate_spectral_norm estimate_spectral_norm_diff estimate_rank Support functions: .. autosummary:: :toctree: generated/ seed rand References ========== This module uses the ID software package [1]_ by Martinsson, Rokhlin, Shkolnisky, and Tygert, which is a Fortran library for computing IDs using various algorithms, including the rank-revealing QR approach of [2]_ and the more recent randomized methods described in [3]_, [4]_, and [5]_. This module exposes its functionality in a way convenient for Python users. Note that this module does not add any functionality beyond that of organizing a simpler and more consistent interface. We advise the user to consult also the `documentation for the ID package <https://cims.nyu.edu/~tygert/id_doc.pdf>`_. .. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a software package for low-rank approximation of matrices via interpolative decompositions, version 0.2." http://cims.nyu.edu/~tygert/id_doc.pdf. .. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, 2005. `doi:10.1137/030602678 <http://dx.doi.org/10.1137/030602678>`_. .. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. Tygert. "Randomized algorithms for the low-rank approximation of matrices." *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. `doi:10.1073/pnas.0709640104 <http://dx.doi.org/10.1073/pnas.0709640104>`_. .. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 (1): 47--68, 2011. `doi:10.1016/j.acha.2010.02.003 <http://dx.doi.org/10.1016/j.acha.2010.02.003>`_. .. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast randomized algorithm for the approximation of matrices." *Appl. Comput. Harmon. Anal.* 25 (3): 335--366, 2008. `doi:10.1016/j.acha.2007.12.002 <http://dx.doi.org/10.1016/j.acha.2007.12.002>`_. Tutorial ======== Initializing ------------ The first step is to import :mod:`scipy.linalg.interpolative` by issuing the command: >>> import scipy.linalg.interpolative as sli Now let's build a matrix. For this, we consider a Hilbert matrix, which is well know to have low rank: >>> from scipy.linalg import hilbert >>> n = 1000 >>> A = hilbert(n) We can also do this explicitly via: >>> import numpy as np >>> n = 1000 >>> A = np.empty((n, n), order='F') >>> for j in range(n): >>> for i in range(m): >>> A[i,j] = 1. / (i + j + 1) Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This instantiates the matrix in Fortran-contiguous order and is important for avoiding data copying when passing to the backend. We then define multiplication routines for the matrix by regarding it as a :class:`scipy.sparse.linalg.LinearOperator`: >>> from scipy.sparse.linalg import aslinearoperator >>> L = aslinearoperator(A) This automatically sets up methods describing the action of the matrix and its adjoint on a vector. Computing an ID --------------- We have several choices of algorithm to compute an ID. These fall largely according to two dichotomies: 1. how the matrix is represented, i.e., via its entries or via its action on a vector; and 2. whether to approximate it to a fixed relative precision or to a fixed rank. We step through each choice in turn below. In all cases, the ID is represented by three parameters: 1. a rank ``k``; 2. an index array ``idx``; and 3. interpolation coefficients ``proj``. The ID is specified by the relation ``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. From matrix entries ................... We first consider a matrix given in terms of its entries. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(A, eps) where ``eps < 1`` is the desired precision. To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(A, k) where ``k >= 1`` is the desired rank. Both algorithms use random sampling and are usually faster than the corresponding older, deterministic algorithms, which can be accessed via the commands: >>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) and: >>> idx, proj = sli.interp_decomp(A, k, rand=False) respectively. From matrix action .................. Now consider a matrix given in terms of its action on a vector as a :class:`scipy.sparse.linalg.LinearOperator`. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(L, eps) To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(L, k) These algorithms are randomized. Reconstructing an ID -------------------- The ID routines above do not output the skeleton and interpolation matrices explicitly but instead return the relevant information in a more compact (and sometimes more useful) form. To build these matrices, write: >>> B = sli.reconstruct_skel_matrix(A, k, idx) for the skeleton matrix and: >>> P = sli.reconstruct_interp_matrix(idx, proj) for the interpolation matrix. The ID approximation can then be computed as: >>> C = np.dot(B, P) This can also be constructed directly using: >>> C = sli.reconstruct_matrix_from_id(B, idx, proj) without having to first compute ``P``. Alternatively, this can be done explicitly as well using: >>> B = A[:,idx[:k]] >>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] >>> C = np.dot(B, P) Computing an SVD ---------------- An ID can be converted to an SVD via the command: >>> U, S, V = sli.id_to_svd(B, idx, proj) The SVD approximation is then: >>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) The SVD can also be computed "fresh" by combining both the ID and conversion steps into one command. Following the various ID algorithms above, there are correspondingly various SVD algorithms that one can employ. From matrix entries ................... We consider first SVD algorithms for a matrix given in terms of its entries. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(A, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(A, k) Both algorithms use random sampling; for the determinstic versions, issue the keyword ``rand=False`` as above. From matrix action .................. Now consider a matrix given in terms of its action on a vector. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(L, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(L, k) Utility routines ---------------- Several utility routines are also available. To estimate the spectral norm of a matrix, use: >>> snorm = sli.estimate_spectral_norm(A) This algorithm is based on the randomized power method and thus requires only matrix-vector products. The number of iterations to take can be set using the keyword ``its`` (default: ``its=20``). The matrix is interpreted as a :class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it as a :class:`numpy.ndarray`, in which case it is trivially converted using :func:`scipy.sparse.linalg.aslinearoperator`. The same algorithm can also estimate the spectral norm of the difference of two matrices ``A1`` and ``A2`` as follows: >>> diff = sli.estimate_spectral_norm_diff(A1, A2) This is often useful for checking the accuracy of a matrix approximation. Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank of a matrix as well. This can be done with either: >>> k = sli.estimate_rank(A, eps) or: >>> k = sli.estimate_rank(L, eps) depending on the representation. The parameter ``eps`` controls the definition of the numerical rank. Finally, the random number generation required for all randomized routines can be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed values to their original values, use: >>> sli.seed('default') To specify the seed values, use: >>> sli.seed(s) where ``s`` must be an integer or array of 55 floats. If an integer, the array of floats is obtained by using `np.random.rand` with the given integer seed. To simply generate some random numbers, type: >>> sli.rand(n) where ``n`` is the number of random numbers to generate. Remarks ------- The above functions all automatically detect the appropriate interface and work with both real and complex data types, passing input arguments to the proper backend routine. """ import scipy.linalg._interpolative_backend as backend import numpy as np _DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") _TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") def _is_real(A): try: if A.dtype == np.complex128: return False elif A.dtype == np.float64: return True else: raise _DTYPE_ERROR except AttributeError: raise _TYPE_ERROR def seed(seed=None): """ Seed the internal random number generator used in this ID package. The generator is a lagged Fibonacci method with 55-element internal state. Parameters ---------- seed : int, sequence, 'default', optional If 'default', the random seed is reset to a default value. If `seed` is a sequence containing 55 floating-point numbers in range [0,1], these are used to set the internal state of the generator. If the value is an integer, the internal state is obtained from `numpy.random.RandomState` (MT19937) with the integer used as the initial seed. If `seed` is omitted (None), `numpy.random` is used to initialize the generator. """ # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`, # and :func:`backend.id_srando`. if isinstance(seed, str) and seed == 'default': backend.id_srando() elif hasattr(seed, '__len__'): state = np.asfortranarray(seed, dtype=float) if state.shape != (55,): raise ValueError("invalid input size") elif state.min() < 0 or state.max() > 1: raise ValueError("values not in range [0,1]") backend.id_srandi(state) elif seed is None: backend.id_srandi(np.random.rand(55)) else: rnd = np.random.RandomState(seed) backend.id_srandi(rnd.rand(55)) def rand(*shape): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. This routine is used for all random number generation in this package and can affect ID and SVD results. Parameters ---------- shape Shape of output array """ # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`. return backend.id_srand(np.prod(shape)).reshape(shape) def interp_decomp(A, eps_or_k, rand=True): """ Compute ID of a matrix. An ID of a matrix `A` is a factorization defined by a rank `k`, a column index array `idx`, and interpolation coefficients `proj` such that:: numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] The original matrix can then be reconstructed as:: numpy.hstack([A[:,idx[:k]], numpy.dot(A[:,idx[:k]], proj)] )[:,numpy.argsort(idx)] or via the routine :func:`reconstruct_matrix_from_id`. This can equivalently be written as:: numpy.dot(A[:,idx[:k]], numpy.hstack([numpy.eye(k), proj]) )[:,np.argsort(idx)] in terms of the skeleton and interpolation matrices:: B = A[:,idx[:k]] and:: P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] respectively. See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. The ID can be computed to any relative precision or rank (depending on the value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then this function has the output signature:: k, idx, proj = interp_decomp(A, eps_or_k) Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output signature is:: idx, proj = interp_decomp(A, eps_or_k) .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_id`, :func:`backend.iddp_aid`, :func:`backend.iddp_rid`, :func:`backend.iddr_id`, :func:`backend.iddr_aid`, :func:`backend.iddr_rid`, :func:`backend.idzp_id`, :func:`backend.idzp_aid`, :func:`backend.idzp_rid`, :func:`backend.idzr_id`, :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` Matrix to be factored eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- k : int Rank required to achieve specified relative precision if `eps_or_k < 1`. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: k, idx, proj = backend.iddp_aid(eps, A) else: k, idx, proj = backend.idzp_aid(eps, A) else: if real: k, idx, proj = backend.iddp_id(eps, A) else: k, idx, proj = backend.idzp_id(eps, A) return k, idx - 1, proj else: k = int(eps_or_k) if rand: if real: idx, proj = backend.iddr_aid(A, k) else: idx, proj = backend.idzr_aid(A, k) else: if real: idx, proj = backend.iddr_id(A, k) else: idx, proj = backend.idzr_id(A, k) return idx - 1, proj elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if eps_or_k < 1: eps = eps_or_k if real: k, idx, proj = backend.iddp_rid(eps, m, n, matveca) else: k, idx, proj = backend.idzp_rid(eps, m, n, matveca) return k, idx - 1, proj else: k = int(eps_or_k) if real: idx, proj = backend.iddr_rid(m, n, matveca, k) else: idx, proj = backend.idzr_rid(m, n, matveca, k) return idx - 1, proj else: raise _TYPE_ERROR def reconstruct_matrix_from_id(B, idx, proj): """ Reconstruct matrix from its ID. A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconid` and :func:`backend.idz_reconid`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Reconstructed matrix. """ if _is_real(B): return backend.idd_reconid(B, idx + 1, proj) else: return backend.idz_reconid(B, idx + 1, proj) def reconstruct_interp_matrix(idx, proj): """ Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients `idx` and `proj`, respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `B` via:: numpy.dot(B, P) See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconint` and :func:`backend.idz_reconint`. Parameters ---------- idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Interpolation matrix. """ if _is_real(proj): return backend.idd_reconint(idx + 1, proj) else: return backend.idz_reconint(idx + 1, proj) def reconstruct_skel_matrix(A, k, idx): """ Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix `A` and its ID rank and indices `k` and `idx`, respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_interp_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_copycols` and :func:`backend.idz_copycols`. Parameters ---------- A : :class:`numpy.ndarray` Original matrix. k : int Rank of ID. idx : :class:`numpy.ndarray` Column index array. Returns ------- :class:`numpy.ndarray` Skeleton matrix. """ if _is_real(A): return backend.idd_copycols(A, k, idx + 1) else: return backend.idz_copycols(A, k, idx + 1) def id_to_svd(B, idx, proj): """ Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_id2svd` and :func:`backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ if _is_real(B): U, V, S = backend.idd_id2svd(B, idx + 1, proj) else: U, V, S = backend.idz_id2svd(B, idx + 1, proj) return U, S, V def estimate_spectral_norm(A, its=20): """ Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_snorm` and :func:`backend.idz_snorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) m, n = A.shape matvec = lambda x: A. matvec(x) matveca = lambda x: A.rmatvec(x) if _is_real(A): return backend.idd_snorm(m, n, matveca, matvec, its=its) else: return backend.idz_snorm(m, n, matveca, matvec, its=its) def estimate_spectral_norm_diff(A, B, its=20): """ Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_diffsnorm` and :func:`backend.idz_diffsnorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). B : :class:`scipy.sparse.linalg.LinearOperator` Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate of matrix difference. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) B = aslinearoperator(B) m, n = A.shape matvec1 = lambda x: A. matvec(x) matveca1 = lambda x: A.rmatvec(x) matvec2 = lambda x: B. matvec(x) matveca2 = lambda x: B.rmatvec(x) if _is_real(A): return backend.idd_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) else: return backend.idz_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) def svd(A, eps_or_k, rand=True): """ Compute SVD of a matrix via an ID. An SVD of a matrix `A` is a factorization:: A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) where `U` and `V` have orthonormal columns and `S` is nonnegative. The SVD can be computed to any relative precision or rank (depending on the value of `eps_or_k`). See also :func:`interp_decomp` and :func:`id_to_svd`. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`, :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`, :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`, :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`, :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`, :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix to be factored, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: U, V, S = backend.iddp_asvd(eps, A) else: U, V, S = backend.idzp_asvd(eps, A) else: if real: U, V, S = backend.iddp_svd(eps, A) else: U, V, S = backend.idzp_svd(eps, A) else: k = int(eps_or_k) if k > min(A.shape): raise ValueError("Approximation rank %s exceeds min(A.shape) = " " %s " % (k, min(A.shape))) if rand: if real: U, V, S = backend.iddr_asvd(A, k) else: U, V, S = backend.idzr_asvd(A, k) else: if real: U, V, S = backend.iddr_svd(A, k) else: U, V, S = backend.idzr_svd(A, k) elif isinstance(A, LinearOperator): m, n = A.shape matvec = lambda x: A.matvec(x) matveca = lambda x: A.rmatvec(x) if eps_or_k < 1: eps = eps_or_k if real: U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec) else: U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec) else: k = int(eps_or_k) if real: U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k) else: U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k) else: raise _TYPE_ERROR return U, S, V def estimate_rank(A, eps): """ Estimate matrix rank to a specified relative precision using randomized methods. The matrix `A` can be given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used for each case. If `A` is of type :class:`numpy.ndarray`, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`, :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix whose rank is to be estimated, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `rmatvec` method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. Returns ------- int Estimated matrix rank. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if real: rank = backend.idd_estrank(eps, A) else: rank = backend.idz_estrank(eps, A) if rank == 0: # special return value for nearly full rank rank = min(A.shape) return rank elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if real: return backend.idd_findrank(eps, m, n, matveca) else: return backend.idz_findrank(eps, m, n, matveca) else: raise _TYPE_ERROR
mit
keflavich/scikit-image
skimage/feature/tests/test_censure.py
30
3606
import numpy as np from numpy.testing import assert_array_equal, assert_raises from skimage.data import moon from skimage.feature import CENSURE from skimage._shared.testing import test_parallel from skimage.transform import rescale img = moon() np.random.seed(0) def test_censure_on_rectangular_images(): """Censure feature detector should work on 2D image of any shape.""" rect_image = np.random.rand(300, 200) square_image = np.random.rand(200, 200) CENSURE().detect((square_image)) CENSURE().detect((rect_image)) def test_keypoints_censure_color_image_unsupported_error(): """Censure keypoints can be extracted from gray-scale images only.""" assert_raises(ValueError, CENSURE().detect, np.zeros((20, 20, 3))) def test_keypoints_censure_mode_validity_error(): """Mode argument in keypoints_censure can be either DoB, Octagon or STAR.""" assert_raises(ValueError, CENSURE, mode='dummy') def test_keypoints_censure_scale_range_error(): """Difference between the the max_scale and min_scale parameters in keypoints_censure should be greater than or equal to two.""" assert_raises(ValueError, CENSURE, min_scale=1, max_scale=2) def test_keypoints_censure_moon_image_dob(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for DoB filter.""" detector = CENSURE() detector.detect(img) expected_keypoints = np.array([[ 21, 497], [ 36, 46], [119, 350], [185, 177], [287, 250], [357, 239], [463, 116], [464, 132], [467, 260]]) expected_scales = np.array([3, 4, 4, 2, 2, 3, 2, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales) @test_parallel() def test_keypoints_censure_moon_image_octagon(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for Octagon filter.""" detector = CENSURE(mode='octagon') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 31, 87], [106, 59], [111, 67]]) expected_scales = np.array([3, 2, 5, 2, 4]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales) def test_keypoints_censure_moon_image_star(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 30, 86], [107, 59], [109, 64], [111, 67], [113, 70]]) expected_scales = np.array([3, 2, 4, 2, 5, 3, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales) if __name__ == '__main__': from numpy import testing testing.run_module_suite()
bsd-3-clause
sebady/selenium
py/test/selenium/webdriver/browser_specific_template.py
19
1503
#!/usr/bin/python # # Copyright 2011 Software Freedom Conservancy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ##CUSTOM_TEST_IMPORT## from selenium import webdriver from selenium.##PACKAGE_NAME## import ##GENERAL_FILENAME## from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer from selenium.test.selenium.webdriver.common.network import get_lan_ip def setup_module(module): ##CUSTOM_TEST_SETUP## webserver = SimpleWebServer(host=get_lan_ip()) webserver.start() ##BROWSER_SPECIFIC_TEST_CLASS##.webserver = webserver ##BROWSER_SPECIFIC_TEST_CLASS##.driver = webdriver.##BROWSER_CONSTRUCTOR## class ##BROWSER_SPECIFIC_TEST_CLASS##(##GENERAL_FILENAME##.##GENERAL_TEST_CLASS##): pass def teardown_module(module): try: ##BROWSER_SPECIFIC_TEST_CLASS##.driver.quit() except AttributeError: pass try: ##BROWSER_SPECIFIC_TEST_CLASS##.webserver.stop() except AttributeError: pass ##CUSTOM_TEST_TEARDOWN##
apache-2.0
CSC301H-Fall2013/JuakStore
site-packages/django/template/smartif.py
239
6269
""" Parser and utilities for the smart 'if' tag """ # Using a simple top down parser, as described here: # http://effbot.org/zone/simple-top-down-parsing.htm. # 'led' = left denotation # 'nud' = null denotation # 'bp' = binding power (left = lbp, right = rbp) class TokenBase(object): """ Base class for operators and literals, mainly for debugging and for throwing syntax errors. """ id = None # node/token type name value = None # used by literals first = second = None # used by tree nodes def nud(self, parser): # Null denotation - called in prefix context raise parser.error_class( "Not expecting '%s' in this position in if tag." % self.id ) def led(self, left, parser): # Left denotation - called in infix context raise parser.error_class( "Not expecting '%s' as infix operator in if tag." % self.id ) def display(self): """ Returns what to display in error messages for this node """ return self.id def __repr__(self): out = [str(x) for x in [self.id, self.first, self.second] if x is not None] return "(" + " ".join(out) + ")" def infix(bp, func): """ Creates an infix operator, given a binding power and a function that evaluates the node """ class Operator(TokenBase): lbp = bp def led(self, left, parser): self.first = left self.second = parser.expression(bp) return self def eval(self, context): try: return func(context, self.first, self.second) except Exception: # Templates shouldn't throw exceptions when rendering. We are # most likely to get exceptions for things like {% if foo in bar # %} where 'bar' does not support 'in', so default to False return False return Operator def prefix(bp, func): """ Creates a prefix operator, given a binding power and a function that evaluates the node. """ class Operator(TokenBase): lbp = bp def nud(self, parser): self.first = parser.expression(bp) self.second = None return self def eval(self, context): try: return func(context, self.first) except Exception: return False return Operator # Operator precedence follows Python. # NB - we can get slightly more accurate syntax error messages by not using the # same object for '==' and '='. # We defer variable evaluation to the lambda to ensure that terms are # lazily evaluated using Python's boolean parsing logic. OPERATORS = { 'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)), 'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)), 'not': prefix(8, lambda context, x: not x.eval(context)), 'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)), 'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)), '=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)), '>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)), '>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)), '<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)), '<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)), } # Assign 'id' to each: for key, op in OPERATORS.items(): op.id = key class Literal(TokenBase): """ A basic self-resolvable object similar to a Django template variable. """ # IfParser uses Literal in create_var, but TemplateIfParser overrides # create_var so that a proper implementation that actually resolves # variables, filters etc is used. id = "literal" lbp = 0 def __init__(self, value): self.value = value def display(self): return repr(self.value) def nud(self, parser): return self def eval(self, context): return self.value def __repr__(self): return "(%s %r)" % (self.id, self.value) class EndToken(TokenBase): lbp = 0 def nud(self, parser): raise parser.error_class("Unexpected end of expression in if tag.") EndToken = EndToken() class IfParser(object): error_class = ValueError def __init__(self, tokens): # pre-pass necessary to turn 'not','in' into single token l = len(tokens) mapped_tokens = [] i = 0 while i < l: token = tokens[i] if token == "not" and i + 1 < l and tokens[i+1] == "in": token = "not in" i += 1 # skip 'in' mapped_tokens.append(self.translate_token(token)) i += 1 self.tokens = mapped_tokens self.pos = 0 self.current_token = self.next_token() def translate_token(self, token): try: op = OPERATORS[token] except (KeyError, TypeError): return self.create_var(token) else: return op() def next_token(self): if self.pos >= len(self.tokens): return EndToken else: retval = self.tokens[self.pos] self.pos += 1 return retval def parse(self): retval = self.expression() # Check that we have exhausted all the tokens if self.current_token is not EndToken: raise self.error_class("Unused '%s' at end of if expression." % self.current_token.display()) return retval def expression(self, rbp=0): t = self.current_token self.current_token = self.next_token() left = t.nud(self) while rbp < self.current_token.lbp: t = self.current_token self.current_token = self.next_token() left = t.led(left, self) return left def create_var(self, value): return Literal(value)
mit
yanheven/neutron
neutron/agent/l3/item_allocator.py
25
4083
# Copyright 2015 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os class ItemAllocator(object): """Manages allocation of items from a pool Some of the allocations such as link local addresses used for routing inside the fip namespaces need to persist across agent restarts to maintain consistency. Persisting such allocations in the neutron database is unnecessary and would degrade performance. ItemAllocator utilizes local file system to track allocations made for objects of a given class. The persistent datastore is a file. The records are one per line of the format: key<delimiter>value. For example if the delimiter is a ',' (the default value) then the records will be: key,value (one per line) """ def __init__(self, state_file, ItemClass, item_pool, delimiter=','): """Read the file with previous allocations recorded. See the note in the allocate method for more detail. """ self.ItemClass = ItemClass self.state_file = state_file self.allocations = {} self.remembered = {} self.pool = item_pool for line in self._read(): key, saved_value = line.strip().split(delimiter) self.remembered[key] = self.ItemClass(saved_value) self.pool.difference_update(self.remembered.values()) def allocate(self, key): """Try to allocate an item of ItemClass type. I expect this to work in all cases because I expect the pool size to be large enough for any situation. Nonetheless, there is some defensive programming in here. Since the allocations are persisted, there is the chance to leak allocations which should have been released but were not. This leak could eventually exhaust the pool. So, if a new allocation is needed, the code first checks to see if there are any remembered allocations for the key. If not, it checks the free pool. If the free pool is empty then it dumps the remembered allocations to free the pool. This final desperate step will not happen often in practice. """ if key in self.remembered: self.allocations[key] = self.remembered.pop(key) return self.allocations[key] if not self.pool: # Desperate times. Try to get more in the pool. self.pool.update(self.remembered.values()) self.remembered.clear() if not self.pool: # More than 256 routers on a compute node! raise RuntimeError("Cannot allocate item of type:" " %s from pool using file %s" % (self.ItemClass, self.state_file)) self.allocations[key] = self.pool.pop() self._write_allocations() return self.allocations[key] def release(self, key): self.pool.add(self.allocations.pop(key)) self._write_allocations() def _write_allocations(self): current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] current.extend(remembered) self._write(current) def _write(self, lines): with open(self.state_file, "w") as f: f.writelines(lines) def _read(self): if not os.path.exists(self.state_file): return [] with open(self.state_file) as f: return f.readlines()
apache-2.0
USGSDenverPychron/pychron
pychron/experiment/utilities/comment_template.py
1
1619
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from traits.api import List, Dict from pychron.core.templater.base_templater import BaseTemplater class CommentTemplater(BaseTemplater): attributes = List(['irrad_level', 'irrad_hole', '<SPACE>']) example_context = Dict({'irrad_level': 'A', 'irrad_hole': '9'}) base_predefined_labels = List(['', 'irrad_level : irrad_hole']) label = 'irrad_level : irrad_hole' persistence_name = 'comment' def render(self, obj): f = self.formatter return f.format(**self._generate_context(obj)) def _generate_context(self, obj): ctx = {} for ai in self.attributes: v = ' ' if ai == '<SPACE>' else getattr(obj, ai) ctx[ai] = str(v) return ctx # ============= EOF =============================================
apache-2.0
kingvuplus/EGAMI-B
lib/python/Plugins/SystemPlugins/DiseqcTester/plugin.py
63
27159
import random from Screens.Satconfig import NimSelection from Screens.Screen import Screen from Screens.TextBox import TextBox from Screens.MessageBox import MessageBox from Plugins.Plugin import PluginDescriptor from Components.ActionMap import ActionMap, NumberActionMap from Components.NimManager import nimmanager from Components.ResourceManager import resourcemanager from Components.TuneTest import TuneTest from Components.Sources.List import List from Components.Sources.Progress import Progress from Components.Sources.StaticText import StaticText from Components.ConfigList import ConfigListScreen from Components.config import getConfigListEntry, ConfigSelection, ConfigYesNo from Components.Harddisk import harddiskmanager # always use: # setResultType(type) # setResultParameter(parameter) # getTextualResult() class ResultParser: def __init__(self): pass TYPE_BYORBPOS = 0 TYPE_BYINDEX = 1 TYPE_ALL = 2 def setResultType(self, type): self.type = type def setResultParameter(self, parameter): if self.type == self.TYPE_BYORBPOS: self.orbpos = parameter elif self.type == self.TYPE_BYINDEX: self.index = parameter def getTextualResultForIndex(self, index, logfulltransponders = False): text = "" text += "%s:\n" % self.getTextualIndexRepresentation(index) failed, successful = self.results[index]["failed"], self.results[index]["successful"] countfailed = len(failed) countsuccessful = len(successful) countall = countfailed + countsuccessful percentfailed = round(countfailed / float(countall + 0.0001) * 100) percentsuccessful = round(countsuccessful / float(countall + 0.0001) * 100) text += "Tested %d transponders\n%d (%d %%) transponders succeeded\n%d (%d %%) transponders failed\n" % (countall, countsuccessful, percentsuccessful, countfailed, percentfailed) reasons = {} completelist = [] if countfailed > 0: for transponder in failed: completelist.append({"transponder": transponder[0], "fedata": transponder[-1]}) reasons[transponder[2]] = reasons.get(transponder[2], []) reasons[transponder[2]].append(transponder) if transponder[2] == "pids_failed": print transponder[2], "-", transponder[3] text += "The %d unsuccessful tuning attempts failed for the following reasons:\n" % countfailed for reason in reasons.keys(): text += "%s: %d transponders failed\n" % (reason, len(reasons[reason])) for reason in reasons.keys(): text += "\n" text += "%s previous planes:\n" % reason for transponder in reasons[reason]: if transponder[1] is not None: text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1])) else: text += "No transponder tuned" text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0])) text += "\n" if logfulltransponders: text += str(transponder[1]) text += " ==> " text += str(transponder[0]) text += "\n" if reason == "pids_failed": text += "(tsid, onid): " text += str(transponder[3]['real']) text += "(read from sat) != " text += str(transponder[3]['expected']) text += "(read from file)" text += "\n" text += "\n" if countsuccessful > 0: text += "\n" text += "Successfully tuned transponders' previous planes:\n" for transponder in successful: completelist.append({"transponder": transponder[0], "fedata": transponder[-1]}) if transponder[1] is not None: text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1])) else: text += "No transponder tuned" text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0])) text += "\n" text += "------------------------------------------------\n" text += "complete transponderlist:\n" for entry in completelist: text += str(entry["transponder"]) + " -- " + str(entry["fedata"]) + "\n" return text def getTextualResult(self): text = "" if self.type == self.TYPE_BYINDEX: text += self.getTextualResultForIndex(self.index) elif self.type == self.TYPE_BYORBPOS: for index in self.results.keys(): if index[2] == self.orbpos: text += self.getTextualResultForIndex(index) text += "\n-----------------------------------------------------\n" elif self.type == self.TYPE_ALL: orderedResults = {} for index in self.results.keys(): orbpos = index[2] orderedResults[orbpos] = orderedResults.get(orbpos, []) orderedResults[orbpos].append(index) ordered_orbpos = orderedResults.keys() ordered_orbpos.sort() for orbpos in ordered_orbpos: text += "\n*****************************************\n" text += "Orbital position %s:" % str(orbpos) text += "\n*****************************************\n" for index in orderedResults[orbpos]: text += self.getTextualResultForIndex(index, logfulltransponders = True) text += "\n-----------------------------------------------------\n" return text class DiseqcTester(Screen, TuneTest, ResultParser): skin = """ <screen position="90,100" size="520,400" title="DiSEqC Tester" > <!--ePixmap pixmap="icons/dish_scan.png" position="5,25" zPosition="0" size="119,110" transparent="1" alphatest="on" /> <widget source="Frontend" render="Label" position="190,10" zPosition="2" size="260,20" font="Regular;19" halign="center" valign="center" transparent="1"> <convert type="FrontendInfo">SNRdB</convert> </widget> <eLabel name="snr" text="SNR:" position="120,35" size="60,22" font="Regular;21" halign="right" transparent="1" /> <widget source="Frontend" render="Progress" position="190,35" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc"> <convert type="FrontendInfo">SNR</convert> </widget> <widget source="Frontend" render="Label" position="460,35" size="60,22" font="Regular;21"> <convert type="FrontendInfo">SNR</convert> </widget> <eLabel name="agc" text="AGC:" position="120,60" size="60,22" font="Regular;21" halign="right" transparent="1" /> <widget source="Frontend" render="Progress" position="190,60" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc"> <convert type="FrontendInfo">AGC</convert> </widget> <widget source="Frontend" render="Label" position="460,60" size="60,22" font="Regular;21"> <convert type="FrontendInfo">AGC</convert> </widget> <eLabel name="ber" text="BER:" position="120,85" size="60,22" font="Regular;21" halign="right" transparent="1" /> <widget source="Frontend" render="Progress" position="190,85" size="260,20" pixmap="bar_ber.png" borderWidth="2" borderColor="#cccccc"> <convert type="FrontendInfo">BER</convert> </widget> <widget source="Frontend" render="Label" position="460,85" size="60,22" font="Regular;21"> <convert type="FrontendInfo">BER</convert> </widget> <eLabel name="lock" text="Lock:" position="120,115" size="60,22" font="Regular;21" halign="right" /> <widget source="Frontend" render="Pixmap" pixmap="icons/lock_on.png" position="190,110" zPosition="1" size="38,31" alphatest="on"> <convert type="FrontendInfo">LOCK</convert> <convert type="ConditionalShowHide" /> </widget> <widget source="Frontend" render="Pixmap" pixmap="icons/lock_off.png" position="190,110" zPosition="1" size="38,31" alphatest="on"> <convert type="FrontendInfo">LOCK</convert> <convert type="ConditionalShowHide">Invert</convert> </widget--> <widget source="progress_list" render="Listbox" position="0,0" size="510,150" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (10, 0), size = (330, 25), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the index name, MultiContentEntryText(pos = (330, 0), size = (150, 25), flags = RT_HALIGN_RIGHT, text = 2) # index 2 is the status, ], "fonts": [gFont("Regular", 20)], "itemHeight": 25 } </convert> </widget> <eLabel name="overall_progress" text="Overall progress:" position="20,162" size="480,22" font="Regular;21" halign="center" transparent="1" /> <widget source="overall_progress" render="Progress" position="20,192" size="480,20" borderWidth="2" backgroundColor="#254f7497" /> <eLabel name="overall_progress" text="Progress:" position="20,222" size="480,22" font="Regular;21" halign="center" transparent="1" /> <widget source="sub_progress" render="Progress" position="20,252" size="480,20" borderWidth="2" backgroundColor="#254f7497" /> <eLabel name="" text="Failed:" position="20,282" size="140,22" font="Regular;21" halign="left" transparent="1" /> <widget source="failed_counter" render="Label" position="160,282" size="100,20" font="Regular;21" /> <eLabel name="" text="Succeeded:" position="20,312" size="140,22" font="Regular;21" halign="left" transparent="1" /> <widget source="succeeded_counter" render="Label" position="160,312" size="100,20" font="Regular;21" /> <eLabel name="" text="With errors:" position="20,342" size="140,22" font="Regular;21" halign="left" transparent="1" /> <widget source="witherrors_counter" render="Label" position="160,342" size="100,20" font="Regular;21" /> <eLabel name="" text="Not tested:" position="20,372" size="140,22" font="Regular;21" halign="left" transparent="1" /> <widget source="untestable_counter" render="Label" position="160,372" size="100,20" font="Regular;21" /> <widget source="CmdText" render="Label" position="300,282" size="180,200" font="Regular;21" /> </screen>""" TEST_TYPE_QUICK = 0 TEST_TYPE_RANDOM = 1 TEST_TYPE_COMPLETE = 2 def __init__(self, session, feid, test_type = TEST_TYPE_QUICK, loopsfailed = 3, loopssuccessful = 1, log = False): Screen.__init__(self, session) self.feid = feid self.test_type = test_type self.loopsfailed = loopsfailed self.loopssuccessful = loopssuccessful self.log = log self["actions"] = NumberActionMap(["SetupActions"], { "ok": self.select, "cancel": self.keyCancel, }, -2) TuneTest.__init__(self, feid, stopOnSuccess = self.loopssuccessful, stopOnError = self.loopsfailed) #self["Frontend"] = FrontendStatus(frontend_source = lambda : self.frontend, update_interval = 100) self["overall_progress"] = Progress() self["sub_progress"] = Progress() self["failed_counter"] = StaticText("0") self["succeeded_counter"] = StaticText("0") self["witherrors_counter"] = StaticText("0") self["untestable_counter"] = StaticText("0") self.list = [] self["progress_list"] = List(self.list) self["progress_list"].onSelectionChanged.append(self.selectionChanged) self["CmdText"] = StaticText(_("Please wait while scanning is in progress...")) self.indexlist = {} self.readTransponderList() self.running = False self.results = {} self.resultsstatus = {} self.onLayoutFinish.append(self.go) def getProgressListComponent(self, index, status): return index, self.getTextualIndexRepresentation(index), status def clearProgressList(self): self.list = [] self["progress_list"].list = self.list def addProgressListItem(self, index): if index in self.indexlist: for entry in self.list: if entry[0] == index: self.changeProgressListStatus(index, "working") return self.list.append(self.getProgressListComponent(index, _("working"))) self["progress_list"].list = self.list self["progress_list"].setIndex(len(self.list) - 1) def changeProgressListStatus(self, index, status): self.newlist = [] count = 0 indexpos = 0 for entry in self.list: if entry[0] == index: self.newlist.append(self.getProgressListComponent(index, status)) indexpos = count else: self.newlist.append(entry) count += 1 self.list = self.newlist self["progress_list"].list = self.list self["progress_list"].setIndex(indexpos) def readTransponderList(self): for sat in nimmanager.getSatListForNim(self.feid): for transponder in nimmanager.getTransponders(sat[0]): #print transponder mytransponder = (transponder[1] / 1000, transponder[2] / 1000, transponder[3], transponder[4], transponder[7], sat[0], transponder[5], transponder[6], transponder[8], transponder[9], transponder[10], transponder[11]) self.analyseTransponder(mytransponder) def getIndexForTransponder(self, transponder): if transponder[0] < 11700: band = 1 # low else: band = 0 # high polarisation = transponder[2] sat = transponder[5] index = (band, polarisation, sat) return index # sort the transponder into self.transponderlist def analyseTransponder(self, transponder): index = self.getIndexForTransponder(transponder) if index not in self.indexlist: self.indexlist[index] = [] self.indexlist[index].append(transponder) #print "self.indexlist:", self.indexlist # returns a string for the user representing a human readable output for index def getTextualIndexRepresentation(self, index): print "getTextualIndexRepresentation:", index text = "" text += nimmanager.getSatDescription(index[2]) + ", " if index[0] == 1: text += "Low Band, " else: text += "High Band, " if index[1] == 0: text += "H" else: text += "V" return text def fillTransponderList(self): self.clearTransponder() print "----------- fillTransponderList" print "index:", self.currentlyTestedIndex keys = self.indexlist.keys() if self.getContinueScanning(): print "index:", self.getTextualIndexRepresentation(self.currentlyTestedIndex) for transponder in self.indexlist[self.currentlyTestedIndex]: self.addTransponder(transponder) print "transponderList:", self.transponderlist return True else: return False def progressCallback(self, progress): if progress[0] != self["sub_progress"].getRange(): self["sub_progress"].setRange(progress[0]) self["sub_progress"].setValue(progress[1]) # logic for scanning order of transponders # on go getFirstIndex is called def getFirstIndex(self): # TODO use other function to scan more randomly if self.test_type == self.TEST_TYPE_QUICK: self.myindex = 0 keys = self.indexlist.keys() keys.sort(key = lambda a: a[2]) # sort by orbpos self["overall_progress"].setRange(len(keys)) self["overall_progress"].setValue(self.myindex) return keys[0] elif self.test_type == self.TEST_TYPE_RANDOM: self.randomkeys = self.indexlist.keys() random.shuffle(self.randomkeys) self.myindex = 0 self["overall_progress"].setRange(len(self.randomkeys)) self["overall_progress"].setValue(self.myindex) return self.randomkeys[0] elif self.test_type == self.TEST_TYPE_COMPLETE: keys = self.indexlist.keys() print "keys:", keys successorindex = {} for index in keys: successorindex[index] = [] for otherindex in keys: if otherindex != index: successorindex[index].append(otherindex) random.shuffle(successorindex[index]) self.keylist = [] stop = False currindex = None while not stop: if currindex is None or len(successorindex[currindex]) == 0: oldindex = currindex for index in successorindex.keys(): if len(successorindex[index]) > 0: currindex = index self.keylist.append(currindex) break if currindex == oldindex: stop = True else: currindex = successorindex[currindex].pop() self.keylist.append(currindex) print "self.keylist:", self.keylist self.myindex = 0 self["overall_progress"].setRange(len(self.keylist)) self["overall_progress"].setValue(self.myindex) return self.keylist[0] # after each index is finished, getNextIndex is called to get the next index to scan def getNextIndex(self): # TODO use other function to scan more randomly if self.test_type == self.TEST_TYPE_QUICK: self.myindex += 1 keys = self.indexlist.keys() keys.sort(key = lambda a: a[2]) # sort by orbpos self["overall_progress"].setValue(self.myindex) if self.myindex < len(keys): return keys[self.myindex] else: return None elif self.test_type == self.TEST_TYPE_RANDOM: self.myindex += 1 keys = self.randomkeys self["overall_progress"].setValue(self.myindex) if self.myindex < len(keys): return keys[self.myindex] else: return None elif self.test_type == self.TEST_TYPE_COMPLETE: self.myindex += 1 keys = self.keylist self["overall_progress"].setValue(self.myindex) if self.myindex < len(keys): return keys[self.myindex] else: return None # after each index is finished and the next index is returned by getNextIndex # the algorithm checks, if we should continue scanning def getContinueScanning(self): if self.test_type == self.TEST_TYPE_QUICK or self.test_type == self.TEST_TYPE_RANDOM: return self.myindex < len(self.indexlist.keys()) elif self.test_type == self.TEST_TYPE_COMPLETE: return self.myindex < len(self.keylist) def addResult(self, index, status, failedTune, successfullyTune): self.results[index] = self.results.get(index, {"failed": [], "successful": [], "status": None, "internalstatus": None}) self.resultsstatus[status] = self.resultsstatus.get(status, []) oldstatus = self.results[index]["internalstatus"] if oldstatus is None: self.results[index]["status"] = status elif oldstatus == "successful": if status == "failed": self.results[index]["status"] = "with_errors" elif status == "successful": self.results[index]["status"] = oldstatus elif status == "with_errors": self.results[index]["status"] = "with_errors" elif status == "not_tested": self.results[index]["status"] = oldstatus elif oldstatus == "failed": if status == "failed": self.results[index]["status"] = oldstatus elif status == "successful": self.results[index]["status"] = "with_errors" elif status == "with_errors": self.results[index]["status"] = "with_errors" elif status == "not_tested": self.results[index]["status"] = oldstatus elif oldstatus == "with_errors": if status == "failed": self.results[index]["status"] = oldstatus elif status == "successful": self.results[index]["status"] = oldstatus elif status == "with_errors": self.results[index]["status"] = oldstatus elif status == "not_tested": self.results[index]["status"] = oldstatus elif oldstatus == "not_tested": self.results[index]["status"] = status if self.results[index]["status"] != "working": self.results[index]["internalstatus"] = self.results[index]["status"] self.results[index]["failed"] = failedTune + self.results[index]["failed"] self.results[index]["successful"] = successfullyTune + self.results[index]["successful"] self.resultsstatus[status].append(index) def finishedChecking(self): print "finishedChecking" TuneTest.finishedChecking(self) if not self.results.has_key(self.currentlyTestedIndex): self.results[self.currentlyTestedIndex] = {"failed": [], "successful": [], "status": None, "internalstatus": None} if len(self.failedTune) > 0 and len(self.successfullyTune) > 0: self.changeProgressListStatus(self.currentlyTestedIndex, "with errors") self["witherrors_counter"].setText(str(int(self["witherrors_counter"].getText()) + 1)) self.addResult(self.currentlyTestedIndex, "with_errors", self.failedTune, self.successfullyTune) elif len(self.failedTune) == 0 and len(self.successfullyTune) == 0: self.changeProgressListStatus(self.currentlyTestedIndex, "not tested") self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1)) self.addResult(self.currentlyTestedIndex, "untestable", self.failedTune, self.successfullyTune) elif len(self.failedTune) > 0: self.changeProgressListStatus(self.currentlyTestedIndex, "failed") #self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune))) self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + 1)) self.addResult(self.currentlyTestedIndex, "failed", self.failedTune, self.successfullyTune) else: self.changeProgressListStatus(self.currentlyTestedIndex, "successful") #self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune))) self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + 1)) self.addResult(self.currentlyTestedIndex, "successful", self.failedTune, self.successfullyTune) #self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune))) #self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune))) #if len(self.failedTune) == 0 and len(self.successfullyTune) == 0: #self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1)) self.currentlyTestedIndex = self.getNextIndex() self.addProgressListItem(self.currentlyTestedIndex) if self.fillTransponderList(): self.run() else: self.running = False self["progress_list"].setIndex(0) print "results:", self.results print "resultsstatus:", self.resultsstatus if self.log: file = open("/media/hdd/diseqctester.log", "w") self.setResultType(ResultParser.TYPE_ALL) file.write(self.getTextualResult()) file.close() self.session.open(MessageBox, text=_("The results have been written to %s.") % "/media/hdd/diseqctester.log", type = MessageBox.TYPE_INFO) def go(self): self.running = True self["failed_counter"].setText("0") self["succeeded_counter"].setText("0") self["untestable_counter"].setText("0") self.currentlyTestedIndex = self.getFirstIndex() self.clearProgressList() self.addProgressListItem(self.currentlyTestedIndex) if self.fillTransponderList(): self.run() def keyCancel(self): self.close() def select(self): print "selectedIndex:", self["progress_list"].getCurrent()[0] if not self.running: index = self["progress_list"].getCurrent()[0] #self.setResultType(ResultParser.TYPE_BYORBPOS) #self.setResultParameter(index[2]) self.setResultType(ResultParser.TYPE_BYINDEX) self.setResultParameter(index) #self.setResultType(ResultParser.TYPE_ALL) self.session.open(TextBox, self.getTextualResult()) def selectionChanged(self): print "selection changed" if len(self.list) > 0 and not self.running: self["CmdText"].setText(_("Press OK to get further details for %s") % str(self["progress_list"].getCurrent()[1])) class DiseqcTesterTestTypeSelection(Screen, ConfigListScreen): def __init__(self, session, feid): Screen.__init__(self, session) # for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning self.skinName = ["DiseqcTesterTestTypeSelection", "Setup" ] self.setup_title = _("DiSEqC-tester settings") self.onChangedEntry = [ ] self.feid = feid self.list = [] ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry) self["actions"] = ActionMap(["SetupActions", "MenuActions"], { "cancel": self.keyCancel, "save": self.keyOK, "ok": self.keyOK, "menu": self.closeRecursive, }, -2) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("OK")) self.createSetup() self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setTitle(self.setup_title) def createSetup(self): self.testtype = ConfigSelection(choices={"quick": _("Quick"), "random": _("Random"), "complete": _("Complete")}, default = "quick") self.testtypeEntry = getConfigListEntry(_("Test type"), self.testtype) self.list.append(self.testtypeEntry) self.loopsfailed = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "3") self.loopsfailedEntry = getConfigListEntry(_("Stop testing plane after # failed transponders"), self.loopsfailed) self.list.append(self.loopsfailedEntry) self.loopssuccessful = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "1") self.loopssuccessfulEntry = getConfigListEntry(_("Stop testing plane after # successful transponders"), self.loopssuccessful) self.list.append(self.loopssuccessfulEntry) self.log = ConfigYesNo(False) if harddiskmanager.HDDCount() > 0: self.logEntry = getConfigListEntry(_("Log results to harddisk"), self.log) self.list.append(self.logEntry) self["config"].list = self.list self["config"].l.setList(self.list) def keyOK(self): print self.testtype.value testtype = DiseqcTester.TEST_TYPE_QUICK if self.testtype.value == "quick": testtype = DiseqcTester.TEST_TYPE_QUICK elif self.testtype.value == "random": testtype = DiseqcTester.TEST_TYPE_RANDOM elif self.testtype.value == "complete": testtype = DiseqcTester.TEST_TYPE_COMPLETE self.session.open(DiseqcTester, feid = self.feid, test_type = testtype, loopsfailed = int(self.loopsfailed.value), loopssuccessful = int(self.loopssuccessful.value), log = self.log.value) def keyCancel(self): self.close() # for summary: def changedEntry(self): for x in self.onChangedEntry: x() def getCurrentEntry(self): return self["config"].getCurrent()[0] def getCurrentValue(self): return str(self["config"].getCurrent()[1].getText()) def createSummary(self): from Screens.Setup import SetupSummary return SetupSummary class DiseqcTesterNimSelection(NimSelection): skin = """ <screen position="160,123" size="400,330" title="Select a tuner"> <widget source="nimlist" render="Listbox" position="0,0" size="380,300" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (10, 5), size = (360, 30), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the nim name, MultiContentEntryText(pos = (50, 30), size = (320, 30), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is a description of the nim settings, ], "fonts": [gFont("Regular", 20), gFont("Regular", 15)], "itemHeight": 70 } </convert> </widget> </screen>""" def __init__(self, session, args = None): NimSelection.__init__(self, session) def setResultClass(self): #self.resultclass = DiseqcTester self.resultclass = DiseqcTesterTestTypeSelection def showNim(self, nim): nimConfig = nimmanager.getNimConfig(nim.slot) if nim.isCompatible("DVB-S"): if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends", "nothing"): return False if nimConfig.configMode.value == "simple": if nimConfig.diseqcMode.value == "positioner": return True return True return False def DiseqcTesterMain(session, **kwargs): session.open(DiseqcTesterNimSelection) def autostart(reason, **kwargs): resourcemanager.addResource("DiseqcTester", DiseqcTesterMain) def Plugins(**kwargs): return [ PluginDescriptor(name="DiSEqC Tester", description=_("Test DiSEqC settings"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=DiseqcTesterMain), PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart)]
gpl-2.0
mageia/dpark
examples/shortpath.py
16
1397
#!/usr/bin/env python2.6 import sys, os.path sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from dpark import Bagel, DparkContext from dpark.bagel import Vertex, Edge, BasicCombiner def to_vertex((id, lines)): outEdges = [Edge(tid, int(v)) for _, tid, v in lines] return (id, Vertex(id, sys.maxint, outEdges, True)) def compute(self, vs, agg, superstep): newValue = min(self.value, vs[0]) if vs else self.value if newValue != self.value: outbox = [(edge.target_id, newValue + edge.value) for edge in self.outEdges] else: outbox = [] return Vertex(self.id, newValue, self.outEdges, False), outbox if __name__ == '__main__': ctx = DparkContext() path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'graph.txt') lines = ctx.textFile(path).map(lambda line:line.split(' ')) vertices = lines.groupBy(lambda line:line[0]).map(to_vertex) startVertex = str(0) messages = ctx.makeRDD([(startVertex, 0)]) print 'read', vertices.count(), 'vertices and ', messages.count(), 'messages.' result = Bagel.run(ctx, vertices, messages, compute, BasicCombiner(min), numSplits=2) print 'Shortest path from %s to all vertices:' % startVertex for id, v in result.collect(): if v.value == sys.maxint: v.value = 'inf' print v.id, v.value
bsd-3-clause
CVML/cvxpy
cvxpy/tests/test_constraints.py
7
8743
""" Copyright 2013 Steven Diamond This file is part of CVXPY. CVXPY is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CVXPY is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CVXPY. If not, see <http://www.gnu.org/licenses/>. """ from cvxpy.expressions.variables import Variable from cvxpy.constraints.second_order import SOC from cvxpy.tests.base_test import BaseTest import numpy as np class TestConstraints(BaseTest): """ Unit tests for the expression/expression module. """ def setUp(self): self.a = Variable(name='a') self.b = Variable(name='b') self.x = Variable(2, name='x') self.y = Variable(3, name='y') self.z = Variable(2, name='z') self.A = Variable(2,2,name='A') self.B = Variable(2,2,name='B') self.C = Variable(3,2,name='C') def test_constr_str(self): """Test string representations of the constraints. """ constr = self.x <= self.x self.assertEqual(repr(constr), "LeqConstraint(%s, %s)" % (repr(self.x), repr(self.x))) constr = self.x <= 2*self.x self.assertEqual(repr(constr), "LeqConstraint(%s, %s)" % (repr(self.x), repr(2*self.x))) constr = 2*self.x >= self.x self.assertEqual(repr(constr), "LeqConstraint(%s, %s)" % (repr(self.x), repr(2*self.x))) def test_eq_constraint(self): """Test the EqConstraint class. """ constr = self.x == self.z self.assertEqual(constr.name(), "x == z") self.assertEqual(constr.size, (2,1)) # self.assertItemsEqual(constr.variables().keys(), [self.x.id, self.z.id]) # Test value and dual_value. assert constr.dual_value is None assert constr.value is None self.x.save_value(2) self.z.save_value(2) assert constr.value self.x.save_value(3) assert not constr.value self.x.value = [2,1] self.z.value = [2,2] assert not constr.value self.assertItemsAlmostEqual(constr.violation, [0,1]) self.z.value = [2,1] assert constr.value self.assertItemsAlmostEqual(constr.violation, [0,0]) with self.assertRaises(Exception) as cm: (self.x == self.y) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Test copy with args=None copy = constr.copy() self.assertTrue(type(copy) is type(constr)) # A new object is constructed, so copy.args == constr.args but copy.args # is not constr.args. self.assertEqual(copy.args, constr.args) self.assertFalse(copy.args is constr.args) # Test copy with new args copy = constr.copy(args=[self.A, self.B]) self.assertTrue(type(copy) is type(constr)) self.assertTrue(copy.args[0] is self.A) self.assertTrue(copy.args[1] is self.B) def test_leq_constraint(self): """Test the LeqConstraint class. """ constr = self.x <= self.z self.assertEqual(constr.name(), "x <= z") self.assertEqual(constr.size, (2, 1)) # Test value and dual_value. assert constr.dual_value is None assert constr.value is None self.x.save_value(1) self.z.save_value(2) assert constr.value self.x.save_value(3) assert not constr.value # self.assertItemsEqual(constr.variables().keys(), [self.x.id, self.z.id]) self.x.value = [2,1] self.z.value = [2,0] assert not constr.value self.assertItemsAlmostEqual(constr.violation, [0,1]) self.z.value = [2,2] assert constr.value self.assertItemsAlmostEqual(constr.violation, [0,0]) with self.assertRaises(Exception) as cm: (self.x <= self.y) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Test copy with args=None copy = constr.copy() self.assertTrue(type(copy) is type(constr)) # A new object is constructed, so copy.args == constr.args but copy.args # is not constr.args. self.assertEqual(copy.args, constr.args) self.assertFalse(copy.args is constr.args) # Test copy with new args copy = constr.copy(args=[self.A, self.B]) self.assertTrue(type(copy) is type(constr)) self.assertTrue(copy.args[0] is self.A) self.assertTrue(copy.args[1] is self.B) def test_psd_constraint(self): """Test the PSD constraint <<. """ constr = self.A >> self.B self.assertEqual(constr.name(), "A >> B") self.assertEqual(constr.size, (2, 2)) # Test value and dual_value. assert constr.dual_value is None assert constr.value is None self.A.save_value(np.matrix("2 -1; 1 2")) self.B.save_value(np.matrix("1 0; 0 1")) assert constr.value self.assertAlmostEqual(constr.violation, 0) self.B.save_value(np.matrix("3 0; 0 3")) assert not constr.value self.assertAlmostEqual(constr.violation, 1) with self.assertRaises(Exception) as cm: (self.x >> self.y) self.assertEqual(str(cm.exception), "Non-square matrix in positive definite constraint.") # Test copy with args=None copy = constr.copy() self.assertTrue(type(copy) is type(constr)) # A new object is constructed, so copy.args == constr.args but copy.args # is not constr.args. self.assertEqual(copy.args, constr.args) self.assertFalse(copy.args is constr.args) # Test copy with new args copy = constr.copy(args=[self.B, self.A]) self.assertTrue(type(copy) is type(constr)) self.assertTrue(copy.args[0] is self.B) self.assertTrue(copy.args[1] is self.A) def test_nsd_constraint(self): """Test the PSD constraint <<. """ constr = self.A << self.B self.assertEqual(constr.name(), "B >> A") self.assertEqual(constr.size, (2, 2)) # Test value and dual_value. assert constr.dual_value is None assert constr.value is None self.B.save_value(np.matrix("2 -1; 1 2")) self.A.save_value(np.matrix("1 0; 0 1")) assert constr.value self.A.save_value(np.matrix("3 0; 0 3")) assert not constr.value with self.assertRaises(Exception) as cm: (self.x << self.y) self.assertEqual(str(cm.exception), "Non-square matrix in positive definite constraint.") def test_lt(self): """Test the < operator. """ constr = self.x < self.z self.assertEqual(constr.name(), "x <= z") self.assertEqual(constr.size, (2, 1)) with self.assertRaises(Exception) as cm: (self.x < self.y) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") def test_geq(self): """Test the >= operator. """ constr = self.z >= self.x self.assertEqual(constr.name(), "x <= z") self.assertEqual(constr.size, (2, 1)) with self.assertRaises(Exception) as cm: (self.y >= self.x) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") def test_gt(self): """Test the > operator. """ constr = self.z > self.x self.assertEqual(constr.name(), "x <= z") self.assertEqual(constr.size, (2, 1)) with self.assertRaises(Exception) as cm: (self.y > self.x) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Test the SOC class. def test_soc_constraint(self): exp = self.x + self.z scalar_exp = self.a + self.b constr = SOC(scalar_exp, [exp]) self.assertEqual(constr.size, (3,1)) def test_chained_constraints(self): """Tests that chaining constraints raises an error. """ with self.assertRaises(Exception) as cm: (self.z <= self.x <= 1) self.assertEqual(str(cm.exception), "Cannot evaluate the truth value of a constraint.") with self.assertRaises(Exception) as cm: (self.x == self.z == 1) self.assertEqual(str(cm.exception), "Cannot evaluate the truth value of a constraint.")
gpl-3.0
40423126/2016fallcadp_ag9
plugin/liquid_tags/test_audio.py
273
1456
from . import audio import pytest import re @pytest.mark.parametrize('input,expected', [ ('http://foo.bar https://bar.foo', ('http://foo.bar', 'https://bar.foo', None)), ('http://test.foo', ('http://test.foo', None, None)), ('https://test.foo', ('https://test.foo', None, None)), ('http://foo.foo https://bar.bar http://zonk.zonk', ('http://foo.foo', 'https://bar.bar', 'http://zonk.zonk')) ]) def test_regex(input, expected): assert re.match(audio.AUDIO, input).groups() == expected @pytest.mark.parametrize('input,expected', [ ('http://foo.foo/foo.mp3', ('<audio controls>' '<source src="http://foo.foo/foo.mp3" type="audio/mpeg">' 'Your browser does not support the audio element.</audio>')), ('https://foo.foo/foo.ogg http://bar.bar/bar.opus', ('<audio controls>' '<source src="https://foo.foo/foo.ogg" type="audio/ogg">' '<source src="http://bar.bar/bar.opus" type="audio/ogg">' 'Your browser does not support the audio element.</audio>')), ('http://1.de/1.wav http://2.de/2.mp4 http://3.de/3.ogg', ('<audio controls>' '<source src="http://1.de/1.wav" type="audio/wav">' '<source src="http://2.de/2.mp4" type="audio/mp4">' '<source src="http://3.de/3.ogg" type="audio/ogg">' 'Your browser does not support the audio element.</audio>')) ]) def test_create_html(input, expected): assert audio.create_html(input) == expected
agpl-3.0
regular/pyglet-avbin-optimizations
tools/al_info.py
4
2207
#!/usr/bin/env python '''Print OpenAL driver information. Options: -d <device> Optionally specify device to query. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import ctypes import optparse import sys from pyglet.media.drivers import openal from pyglet.media.drivers.openal import lib_openal as al from pyglet.media.drivers.openal import lib_alc as alc def split_nul_strings(s): # NUL-separated list of strings, double-NUL-terminated. nul = False i = 0 while True: if s[i] == '\0': if nul: break else: nul = True else: nul = False i += 1 s = s[:i - 1] return s.split('\0') if __name__ == '__main__': op = optparse.OptionParser() op.add_option('-d', '--device', dest='device', help='use device DEVICE', metavar='DEVICE') (options, args) = op.parse_args(sys.argv[1:]) default_device = ctypes.cast( alc.alcGetString(None, alc.ALC_DEFAULT_DEVICE_SPECIFIER), ctypes.c_char_p).value capture_default_device = ctypes.cast( alc.alcGetString(None, alc.ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER), ctypes.c_char_p).value print 'Default device: %s' % default_device print 'Default capture device: %s' % capture_default_device if alc.alcIsExtensionPresent(None, 'ALC_ENUMERATION_EXT'): # Hmm, actually not allowed to pass NULL to alcIsExtension present.. # how is this supposed to work? devices = split_nul_strings( alc.alcGetString(None, alc.ALC_DEVICE_SPECIFIER)) capture_devices = split_nul_strings( alc.alcGetString(None, alc.ALC_CAPTURE_DEVICE_SPECIFIER)) print 'Devices: %s' % ', '.join(devices) print 'Capture devices: %s' % ', '.join(capture_devices) print if options.device: print 'Using device "%s"...' % options.device openal.driver_init(options.device) else: print 'Using default device...' openal.driver_init() print 'OpenAL version %d.%d' % openal.get_version() print 'Extensions: %s' % ', '.join(openal.get_extensions())
bsd-3-clause
wlerin/streamlink
src/streamlink/plugins/trtspor.py
4
1230
from __future__ import print_function import re from streamlink.plugin import Plugin from streamlink.plugin.api import validate from streamlink.stream import AkamaiHDStream from streamlink.stream import HDSStream from streamlink.stream import HLSStream class TRTSpor(Plugin): """ Support for trtsport.com a Turkish Sports Broadcaster """ url_re = re.compile(r"https?://(?:www.)?trtspor.com/canli-yayin-izle/.+/?") f4mm_re = re.compile(r'''(?P<q>["'])(?P<url>http[^"']+?.f4m)(?P=q),''') m3u8_re = re.compile(r'''(?P<q>["'])(?P<url>http[^"']+?.m3u8)(?P=q),''') @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _get_streams(self): res = self.session.http.get(self.url) url_m = self.m3u8_re.search(res.text) hls_url = url_m and url_m.group("url") if hls_url: for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): yield s f4m_m = self.f4mm_re.search(res.text) f4m_url = f4m_m and f4m_m.group("url") if f4m_url: for n, s in HDSStream.parse_manifest(self.session, f4m_url).items(): yield n, s __plugin__ = TRTSpor
bsd-2-clause
ekwoodrich/nirha
nirhalib/venv/lib/python2.7/site-packages/flask/testsuite/helpers.py
405
21973
# -*- coding: utf-8 -*- """ flask.testsuite.helpers ~~~~~~~~~~~~~~~~~~~~~~~ Various helpers. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import os import flask import unittest from logging import StreamHandler from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr from werkzeug.http import parse_cache_control_header, parse_options_header from flask._compat import StringIO, text_type def has_encoding(name): try: import codecs codecs.lookup(name) return True except LookupError: return False class JSONTestCase(FlaskTestCase): def test_json_bad_requests(self): app = flask.Flask(__name__) @app.route('/json', methods=['POST']) def return_json(): return flask.jsonify(foo=text_type(flask.request.get_json())) c = app.test_client() rv = c.post('/json', data='malformed', content_type='application/json') self.assert_equal(rv.status_code, 400) def test_json_body_encoding(self): app = flask.Flask(__name__) app.testing = True @app.route('/') def index(): return flask.request.get_json() c = app.test_client() resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'), content_type='application/json; charset=iso-8859-15') self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8')) def test_jsonify(self): d = dict(a=23, b=42, c=[1, 2, 3]) app = flask.Flask(__name__) @app.route('/kw') def return_kwargs(): return flask.jsonify(**d) @app.route('/dict') def return_dict(): return flask.jsonify(d) c = app.test_client() for url in '/kw', '/dict': rv = c.get(url) self.assert_equal(rv.mimetype, 'application/json') self.assert_equal(flask.json.loads(rv.data), d) def test_json_as_unicode(self): app = flask.Flask(__name__) app.config['JSON_AS_ASCII'] = True with app.app_context(): rv = flask.json.dumps(u'\N{SNOWMAN}') self.assert_equal(rv, '"\\u2603"') app.config['JSON_AS_ASCII'] = False with app.app_context(): rv = flask.json.dumps(u'\N{SNOWMAN}') self.assert_equal(rv, u'"\u2603"') def test_json_attr(self): app = flask.Flask(__name__) @app.route('/add', methods=['POST']) def add(): json = flask.request.get_json() return text_type(json['a'] + json['b']) c = app.test_client() rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}), content_type='application/json') self.assert_equal(rv.data, b'3') def test_template_escaping(self): app = flask.Flask(__name__) render = flask.render_template_string with app.test_request_context(): rv = flask.json.htmlsafe_dumps('</script>') self.assert_equal(rv, u'"\\u003c/script\\u003e"') self.assert_equal(type(rv), text_type) rv = render('{{ "</script>"|tojson }}') self.assert_equal(rv, '"\\u003c/script\\u003e"') rv = render('{{ "<\0/script>"|tojson }}') self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"') rv = render('{{ "<!--<script>"|tojson }}') self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"') rv = render('{{ "&"|tojson }}') self.assert_equal(rv, '"\\u0026"') rv = render('{{ "\'"|tojson }}') self.assert_equal(rv, '"\\u0027"') rv = render("<a ng-data='{{ data|tojson }}'></a>", data={'x': ["foo", "bar", "baz'"]}) self.assert_equal(rv, '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>') def test_json_customization(self): class X(object): def __init__(self, val): self.val = val class MyEncoder(flask.json.JSONEncoder): def default(self, o): if isinstance(o, X): return '<%d>' % o.val return flask.json.JSONEncoder.default(self, o) class MyDecoder(flask.json.JSONDecoder): def __init__(self, *args, **kwargs): kwargs.setdefault('object_hook', self.object_hook) flask.json.JSONDecoder.__init__(self, *args, **kwargs) def object_hook(self, obj): if len(obj) == 1 and '_foo' in obj: return X(obj['_foo']) return obj app = flask.Flask(__name__) app.testing = True app.json_encoder = MyEncoder app.json_decoder = MyDecoder @app.route('/', methods=['POST']) def index(): return flask.json.dumps(flask.request.get_json()['x']) c = app.test_client() rv = c.post('/', data=flask.json.dumps({ 'x': {'_foo': 42} }), content_type='application/json') self.assertEqual(rv.data, b'"<42>"') def test_modified_url_encoding(self): class ModifiedRequest(flask.Request): url_charset = 'euc-kr' app = flask.Flask(__name__) app.testing = True app.request_class = ModifiedRequest app.url_map.charset = 'euc-kr' @app.route('/') def index(): return flask.request.args['foo'] rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr')) self.assert_equal(rv.status_code, 200) self.assert_equal(rv.data, u'정상처리'.encode('utf-8')) if not has_encoding('euc-kr'): test_modified_url_encoding = None def test_json_key_sorting(self): app = flask.Flask(__name__) app.testing = True self.assert_equal(app.config['JSON_SORT_KEYS'], True) d = dict.fromkeys(range(20), 'foo') @app.route('/') def index(): return flask.jsonify(values=d) c = app.test_client() rv = c.get('/') lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()] self.assert_equal(lines, [ '{', '"values": {', '"0": "foo",', '"1": "foo",', '"2": "foo",', '"3": "foo",', '"4": "foo",', '"5": "foo",', '"6": "foo",', '"7": "foo",', '"8": "foo",', '"9": "foo",', '"10": "foo",', '"11": "foo",', '"12": "foo",', '"13": "foo",', '"14": "foo",', '"15": "foo",', '"16": "foo",', '"17": "foo",', '"18": "foo",', '"19": "foo"', '}', '}' ]) class SendfileTestCase(FlaskTestCase): def test_send_file_regular(self): app = flask.Flask(__name__) with app.test_request_context(): rv = flask.send_file('static/index.html') self.assert_true(rv.direct_passthrough) self.assert_equal(rv.mimetype, 'text/html') with app.open_resource('static/index.html') as f: rv.direct_passthrough = False self.assert_equal(rv.data, f.read()) rv.close() def test_send_file_xsendfile(self): app = flask.Flask(__name__) app.use_x_sendfile = True with app.test_request_context(): rv = flask.send_file('static/index.html') self.assert_true(rv.direct_passthrough) self.assert_in('x-sendfile', rv.headers) self.assert_equal(rv.headers['x-sendfile'], os.path.join(app.root_path, 'static/index.html')) self.assert_equal(rv.mimetype, 'text/html') rv.close() def test_send_file_object(self): app = flask.Flask(__name__) with catch_warnings() as captured: with app.test_request_context(): f = open(os.path.join(app.root_path, 'static/index.html')) rv = flask.send_file(f) rv.direct_passthrough = False with app.open_resource('static/index.html') as f: self.assert_equal(rv.data, f.read()) self.assert_equal(rv.mimetype, 'text/html') rv.close() # mimetypes + etag self.assert_equal(len(captured), 2) app.use_x_sendfile = True with catch_warnings() as captured: with app.test_request_context(): f = open(os.path.join(app.root_path, 'static/index.html')) rv = flask.send_file(f) self.assert_equal(rv.mimetype, 'text/html') self.assert_in('x-sendfile', rv.headers) self.assert_equal(rv.headers['x-sendfile'], os.path.join(app.root_path, 'static/index.html')) rv.close() # mimetypes + etag self.assert_equal(len(captured), 2) app.use_x_sendfile = False with app.test_request_context(): with catch_warnings() as captured: f = StringIO('Test') rv = flask.send_file(f) rv.direct_passthrough = False self.assert_equal(rv.data, b'Test') self.assert_equal(rv.mimetype, 'application/octet-stream') rv.close() # etags self.assert_equal(len(captured), 1) with catch_warnings() as captured: f = StringIO('Test') rv = flask.send_file(f, mimetype='text/plain') rv.direct_passthrough = False self.assert_equal(rv.data, b'Test') self.assert_equal(rv.mimetype, 'text/plain') rv.close() # etags self.assert_equal(len(captured), 1) app.use_x_sendfile = True with catch_warnings() as captured: with app.test_request_context(): f = StringIO('Test') rv = flask.send_file(f) self.assert_not_in('x-sendfile', rv.headers) rv.close() # etags self.assert_equal(len(captured), 1) def test_attachment(self): app = flask.Flask(__name__) with catch_warnings() as captured: with app.test_request_context(): f = open(os.path.join(app.root_path, 'static/index.html')) rv = flask.send_file(f, as_attachment=True) value, options = parse_options_header(rv.headers['Content-Disposition']) self.assert_equal(value, 'attachment') rv.close() # mimetypes + etag self.assert_equal(len(captured), 2) with app.test_request_context(): self.assert_equal(options['filename'], 'index.html') rv = flask.send_file('static/index.html', as_attachment=True) value, options = parse_options_header(rv.headers['Content-Disposition']) self.assert_equal(value, 'attachment') self.assert_equal(options['filename'], 'index.html') rv.close() with app.test_request_context(): rv = flask.send_file(StringIO('Test'), as_attachment=True, attachment_filename='index.txt', add_etags=False) self.assert_equal(rv.mimetype, 'text/plain') value, options = parse_options_header(rv.headers['Content-Disposition']) self.assert_equal(value, 'attachment') self.assert_equal(options['filename'], 'index.txt') rv.close() def test_static_file(self): app = flask.Flask(__name__) # default cache timeout is 12 hours with app.test_request_context(): # Test with static file handler. rv = app.send_static_file('index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 12 * 60 * 60) rv.close() # Test again with direct use of send_file utility. rv = flask.send_file('static/index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 12 * 60 * 60) rv.close() app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600 with app.test_request_context(): # Test with static file handler. rv = app.send_static_file('index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 3600) rv.close() # Test again with direct use of send_file utility. rv = flask.send_file('static/index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 3600) rv.close() class StaticFileApp(flask.Flask): def get_send_file_max_age(self, filename): return 10 app = StaticFileApp(__name__) with app.test_request_context(): # Test with static file handler. rv = app.send_static_file('index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 10) rv.close() # Test again with direct use of send_file utility. rv = flask.send_file('static/index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) self.assert_equal(cc.max_age, 10) rv.close() class LoggingTestCase(FlaskTestCase): def test_logger_cache(self): app = flask.Flask(__name__) logger1 = app.logger self.assert_true(app.logger is logger1) self.assert_equal(logger1.name, __name__) app.logger_name = __name__ + '/test_logger_cache' self.assert_true(app.logger is not logger1) def test_debug_log(self): app = flask.Flask(__name__) app.debug = True @app.route('/') def index(): app.logger.warning('the standard library is dead') app.logger.debug('this is a debug statement') return '' @app.route('/exc') def exc(): 1 // 0 with app.test_client() as c: with catch_stderr() as err: c.get('/') out = err.getvalue() self.assert_in('WARNING in helpers [', out) self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out) self.assert_in('the standard library is dead', out) self.assert_in('this is a debug statement', out) with catch_stderr() as err: try: c.get('/exc') except ZeroDivisionError: pass else: self.assert_true(False, 'debug log ate the exception') def test_debug_log_override(self): app = flask.Flask(__name__) app.debug = True app.logger_name = 'flask_tests/test_debug_log_override' app.logger.level = 10 self.assert_equal(app.logger.level, 10) def test_exception_logging(self): out = StringIO() app = flask.Flask(__name__) app.logger_name = 'flask_tests/test_exception_logging' app.logger.addHandler(StreamHandler(out)) @app.route('/') def index(): 1 // 0 rv = app.test_client().get('/') self.assert_equal(rv.status_code, 500) self.assert_in(b'Internal Server Error', rv.data) err = out.getvalue() self.assert_in('Exception on / [GET]', err) self.assert_in('Traceback (most recent call last):', err) self.assert_in('1 // 0', err) self.assert_in('ZeroDivisionError:', err) def test_processor_exceptions(self): app = flask.Flask(__name__) @app.before_request def before_request(): if trigger == 'before': 1 // 0 @app.after_request def after_request(response): if trigger == 'after': 1 // 0 return response @app.route('/') def index(): return 'Foo' @app.errorhandler(500) def internal_server_error(e): return 'Hello Server Error', 500 for trigger in 'before', 'after': rv = app.test_client().get('/') self.assert_equal(rv.status_code, 500) self.assert_equal(rv.data, b'Hello Server Error') def test_url_for_with_anchor(self): app = flask.Flask(__name__) @app.route('/') def index(): return '42' with app.test_request_context(): self.assert_equal(flask.url_for('index', _anchor='x y'), '/#x%20y') def test_url_for_with_scheme(self): app = flask.Flask(__name__) @app.route('/') def index(): return '42' with app.test_request_context(): self.assert_equal(flask.url_for('index', _external=True, _scheme='https'), 'https://localhost/') def test_url_for_with_scheme_not_external(self): app = flask.Flask(__name__) @app.route('/') def index(): return '42' with app.test_request_context(): self.assert_raises(ValueError, flask.url_for, 'index', _scheme='https') def test_url_with_method(self): from flask.views import MethodView app = flask.Flask(__name__) class MyView(MethodView): def get(self, id=None): if id is None: return 'List' return 'Get %d' % id def post(self): return 'Create' myview = MyView.as_view('myview') app.add_url_rule('/myview/', methods=['GET'], view_func=myview) app.add_url_rule('/myview/<int:id>', methods=['GET'], view_func=myview) app.add_url_rule('/myview/create', methods=['POST'], view_func=myview) with app.test_request_context(): self.assert_equal(flask.url_for('myview', _method='GET'), '/myview/') self.assert_equal(flask.url_for('myview', id=42, _method='GET'), '/myview/42') self.assert_equal(flask.url_for('myview', _method='POST'), '/myview/create') class NoImportsTestCase(FlaskTestCase): """Test Flasks are created without import. Avoiding ``__import__`` helps create Flask instances where there are errors at import time. Those runtime errors will be apparent to the user soon enough, but tools which build Flask instances meta-programmatically benefit from a Flask which does not ``__import__``. Instead of importing to retrieve file paths or metadata on a module or package, use the pkgutil and imp modules in the Python standard library. """ def test_name_with_import_error(self): try: flask.Flask('importerror') except NotImplementedError: self.fail('Flask(import_name) is importing import_name.') class StreamingTestCase(FlaskTestCase): def test_streaming_with_context(self): app = flask.Flask(__name__) app.testing = True @app.route('/') def index(): def generate(): yield 'Hello ' yield flask.request.args['name'] yield '!' return flask.Response(flask.stream_with_context(generate())) c = app.test_client() rv = c.get('/?name=World') self.assertEqual(rv.data, b'Hello World!') def test_streaming_with_context_as_decorator(self): app = flask.Flask(__name__) app.testing = True @app.route('/') def index(): @flask.stream_with_context def generate(): yield 'Hello ' yield flask.request.args['name'] yield '!' return flask.Response(generate()) c = app.test_client() rv = c.get('/?name=World') self.assertEqual(rv.data, b'Hello World!') def test_streaming_with_context_and_custom_close(self): app = flask.Flask(__name__) app.testing = True called = [] class Wrapper(object): def __init__(self, gen): self._gen = gen def __iter__(self): return self def close(self): called.append(42) def __next__(self): return next(self._gen) next = __next__ @app.route('/') def index(): def generate(): yield 'Hello ' yield flask.request.args['name'] yield '!' return flask.Response(flask.stream_with_context( Wrapper(generate()))) c = app.test_client() rv = c.get('/?name=World') self.assertEqual(rv.data, b'Hello World!') self.assertEqual(called, [42]) def suite(): suite = unittest.TestSuite() if flask.json_available: suite.addTest(unittest.makeSuite(JSONTestCase)) suite.addTest(unittest.makeSuite(SendfileTestCase)) suite.addTest(unittest.makeSuite(LoggingTestCase)) suite.addTest(unittest.makeSuite(NoImportsTestCase)) suite.addTest(unittest.makeSuite(StreamingTestCase)) return suite
apache-2.0
ericfc/django
tests/model_fields/test_durationfield.py
296
2724
import datetime import json from django import forms from django.core import exceptions, serializers from django.db import models from django.test import SimpleTestCase, TestCase from .models import DurationModel, NullDurationModel class TestSaveLoad(TestCase): def test_simple_roundtrip(self): duration = datetime.timedelta(days=123, seconds=123, microseconds=123) DurationModel.objects.create(field=duration) loaded = DurationModel.objects.get() self.assertEqual(loaded.field, duration) def test_create_empty(self): NullDurationModel.objects.create() loaded = NullDurationModel.objects.get() self.assertEqual(loaded.field, None) class TestQuerying(TestCase): @classmethod def setUpTestData(cls): cls.objs = [ DurationModel.objects.create(field=datetime.timedelta(days=1)), DurationModel.objects.create(field=datetime.timedelta(seconds=1)), DurationModel.objects.create(field=datetime.timedelta(seconds=-1)), ] def test_exact(self): self.assertSequenceEqual( DurationModel.objects.filter(field=datetime.timedelta(days=1)), [self.objs[0]] ) def test_gt(self): self.assertSequenceEqual( DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)), [self.objs[0], self.objs[1]] ) class TestSerialization(SimpleTestCase): test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]' def test_dumping(self): instance = DurationModel(field=datetime.timedelta(days=1, hours=1)) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1)) class TestValidation(SimpleTestCase): def test_invalid_string(self): field = models.DurationField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('not a datetime', None) self.assertEqual(cm.exception.code, 'invalid') self.assertEqual( cm.exception.message % cm.exception.params, "'not a datetime' value has an invalid format. " "It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format." ) class TestFormField(SimpleTestCase): # Tests for forms.DurationField are in the forms_tests app. def test_formfield(self): field = models.DurationField() self.assertIsInstance(field.formfield(), forms.DurationField)
bsd-3-clause
jordanemedlock/psychtruths
temboo/Library/NYTimes/CampaignFinance/PresidentialCampaigns/PresidentialCandidateTotals.py
5
3920
# -*- coding: utf-8 -*- ############################################################################### # # PresidentialCandidateTotals # Retrieves the total receipts and disbursements for all presidential candidates for a particular campaign cycle. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class PresidentialCandidateTotals(Choreography): def __init__(self, temboo_session): """ Create a new instance of the PresidentialCandidateTotals Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(PresidentialCandidateTotals, self).__init__(temboo_session, '/Library/NYTimes/CampaignFinance/PresidentialCampaigns/PresidentialCandidateTotals') def new_input_set(self): return PresidentialCandidateTotalsInputSet() def _make_result_set(self, result, path): return PresidentialCandidateTotalsResultSet(result, path) def _make_execution(self, session, exec_id, path): return PresidentialCandidateTotalsChoreographyExecution(session, exec_id, path) class PresidentialCandidateTotalsInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the PresidentialCandidateTotals Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by NY Times.) """ super(PresidentialCandidateTotalsInputSet, self)._set_input('APIKey', value) def set_CampaignCycle(self, value): """ Set the value of the CampaignCycle input for this Choreo. ((required, integer) Enter the campaign cycle year in YYYY format. This must be an even year.) """ super(PresidentialCandidateTotalsInputSet, self)._set_input('CampaignCycle', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) Enter json or xml. Default is json.) """ super(PresidentialCandidateTotalsInputSet, self)._set_input('ResponseFormat', value) class PresidentialCandidateTotalsResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the PresidentialCandidateTotals Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from the NY Times API corresponds to the setting (json, or xml) entered in the ResponseFormat variable. Default is set to json.) """ return self._output.get('Response', None) class PresidentialCandidateTotalsChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return PresidentialCandidateTotalsResultSet(response, path)
apache-2.0
andysim/psi4
psi4/driver/qcdb/exceptions.py
3
3684
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2017 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """Module with non-generic exceptions classes.""" from __future__ import print_function class QcdbException(Exception): """Error class for QCDB.""" pass class FeatureNotImplemented(QcdbException): """Error called for functions defined but not yet implemented. Also for functions defined that will never be implemented. """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nQcdbException: Feature %s is not yet implemented.\n\n' % (msg)) class ValidationError(QcdbException): """Error called for problems with syntax input file. Prints error message *msg* to standard output stream. """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nQcdbException: %s\n\n' % (msg)) class IncompleteAtomError(QcdbException): """Error raised when not all variables in an atom specification have been defined at compute time. May be a temporary situation so message not printed but appears as traceback when error persists. """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg class ParsingValidationError(QcdbException): """Error called for problems with syntax from a QC output file. Prints error message *msg* to standard output stream. """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nQcdbException: %s\n\n' % (msg)) class FragmentCountError(QcdbException): """Error called molecule has wrong number of fragments for method. Prints error message *msg* to standard output stream. """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg #print('\nQcdbException: %s\n\n' % (msg)) class BasisSetFileNotFound(QcdbException): """ """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nQcdbException BasisSetFileNotFound: %s\n\n' % (msg)) class BasisSetNotFound(QcdbException): """ """ def __init__(self, msg, silent=False): QcdbException.__init__(self, msg) self.msg = msg if not silent: print('\nQcdbException BasisSetNotFound: %s\n\n' % (msg)) class BasisSetNotDefined(QcdbException): """ """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nQcdbException BasisSetNotDefined: %s\n\n' % (msg)) class Dftd3Error(QcdbException): """ """ def __init__(self, msg): QcdbException.__init__(self, msg) self.msg = msg print('\nDftd3Error: %s\n\n' % (msg))
gpl-2.0
sillydan1/WhatEverEngine
openglcsharp/Lib/encodings/mac_latin2.py
647
8565
""" Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py. Written by Marc-Andre Lemburg ([email protected]). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. (c) Copyright 2000 Guido van Rossum. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_map) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_map)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-latin2', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x0081: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON 0x0082: 0x0101, # LATIN SMALL LETTER A WITH MACRON 0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0084: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK 0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x0088: 0x0105, # LATIN SMALL LETTER A WITH OGONEK 0x0089: 0x010c, # LATIN CAPITAL LETTER C WITH CARON 0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x008b: 0x010d, # LATIN SMALL LETTER C WITH CARON 0x008c: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE 0x008d: 0x0107, # LATIN SMALL LETTER C WITH ACUTE 0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE 0x0090: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE 0x0091: 0x010e, # LATIN CAPITAL LETTER D WITH CARON 0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x0093: 0x010f, # LATIN SMALL LETTER D WITH CARON 0x0094: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON 0x0095: 0x0113, # LATIN SMALL LETTER E WITH MACRON 0x0096: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x0098: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE 0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x009d: 0x011a, # LATIN CAPITAL LETTER E WITH CARON 0x009e: 0x011b, # LATIN SMALL LETTER E WITH CARON 0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x00a0: 0x2020, # DAGGER 0x00a1: 0x00b0, # DEGREE SIGN 0x00a2: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK 0x00a4: 0x00a7, # SECTION SIGN 0x00a5: 0x2022, # BULLET 0x00a6: 0x00b6, # PILCROW SIGN 0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S 0x00a8: 0x00ae, # REGISTERED SIGN 0x00aa: 0x2122, # TRADE MARK SIGN 0x00ab: 0x0119, # LATIN SMALL LETTER E WITH OGONEK 0x00ac: 0x00a8, # DIAERESIS 0x00ad: 0x2260, # NOT EQUAL TO 0x00ae: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA 0x00af: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK 0x00b0: 0x012f, # LATIN SMALL LETTER I WITH OGONEK 0x00b1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON 0x00b2: 0x2264, # LESS-THAN OR EQUAL TO 0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO 0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON 0x00b5: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA 0x00b6: 0x2202, # PARTIAL DIFFERENTIAL 0x00b7: 0x2211, # N-ARY SUMMATION 0x00b8: 0x0142, # LATIN SMALL LETTER L WITH STROKE 0x00b9: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA 0x00ba: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA 0x00bb: 0x013d, # LATIN CAPITAL LETTER L WITH CARON 0x00bc: 0x013e, # LATIN SMALL LETTER L WITH CARON 0x00bd: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE 0x00be: 0x013a, # LATIN SMALL LETTER L WITH ACUTE 0x00bf: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA 0x00c0: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA 0x00c1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE 0x00c2: 0x00ac, # NOT SIGN 0x00c3: 0x221a, # SQUARE ROOT 0x00c4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE 0x00c5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON 0x00c6: 0x2206, # INCREMENT 0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00c9: 0x2026, # HORIZONTAL ELLIPSIS 0x00ca: 0x00a0, # NO-BREAK SPACE 0x00cb: 0x0148, # LATIN SMALL LETTER N WITH CARON 0x00cc: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00ce: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE 0x00cf: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON 0x00d0: 0x2013, # EN DASH 0x00d1: 0x2014, # EM DASH 0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK 0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x00d6: 0x00f7, # DIVISION SIGN 0x00d7: 0x25ca, # LOZENGE 0x00d8: 0x014d, # LATIN SMALL LETTER O WITH MACRON 0x00d9: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE 0x00da: 0x0155, # LATIN SMALL LETTER R WITH ACUTE 0x00db: 0x0158, # LATIN CAPITAL LETTER R WITH CARON 0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK 0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 0x00de: 0x0159, # LATIN SMALL LETTER R WITH CARON 0x00df: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA 0x00e0: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA 0x00e1: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK 0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x00e4: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00e5: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE 0x00e6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE 0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE 0x00e8: 0x0164, # LATIN CAPITAL LETTER T WITH CARON 0x00e9: 0x0165, # LATIN SMALL LETTER T WITH CARON 0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE 0x00eb: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON 0x00ec: 0x017e, # LATIN SMALL LETTER Z WITH CARON 0x00ed: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON 0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00f0: 0x016b, # LATIN SMALL LETTER U WITH MACRON 0x00f1: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE 0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE 0x00f3: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE 0x00f4: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE 0x00f5: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE 0x00f6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK 0x00f7: 0x0173, # LATIN SMALL LETTER U WITH OGONEK 0x00f8: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE 0x00f9: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE 0x00fa: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA 0x00fb: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x00fc: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE 0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x00fe: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA 0x00ff: 0x02c7, # CARON }) ### Encoding Map encoding_map = codecs.make_encoding_map(decoding_map)
apache-2.0
vinilios/synnefo
snf-cyclades-app/synnefo/logic/management/commands/port-create.py
9
6075
# Copyright (C) 2010-2014 GRNET S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from optparse import make_option from django.core.management.base import CommandError from synnefo.api import util from synnefo.management import common, pprint from snf_django.management.utils import parse_bool from snf_django.management.commands import SynnefoCommand from synnefo.logic import servers HELP_MSG = """Create a new port. Connect a server/router to a network by creating a new port. If 'floating_ip' option is used, the specified floating IP will be assigned to the new port. Otherwise, the port will get an IP address for each Subnet that is associated with the network.""" class Command(SynnefoCommand): help = HELP_MSG option_list = SynnefoCommand.option_list + ( make_option( "--name", dest="name", default=None, help="Name of the port."), make_option( "--user", dest="user_id", default=None, help="UUID of the owner of the Port."), make_option( "--network", dest="network_id", default=None, help="The ID of the network where the port will be created."), make_option( "--server", dest="server_id", default=None, help="The ID of the server that the port will be connected to."), #make_option( # "--router", # dest="router_id", # default=None, # help="The ID of the router that the port will be connected to."), make_option( "--floating-ip", dest="floating_ip_id", default=None, help="The ID of the floating IP to use for the port."), make_option( "--ipv4-address", dest="ipv4_address", default=None, help="Specify IPv4 address for the new port."), make_option( "--security-groups", dest="security-groups", default=None, help="Comma separated list of Security Group IDs to associate" " with the port."), make_option( "--wait", dest="wait", default="True", choices=["True", "False"], metavar="True|False", help="Wait for Ganeti jobs to complete. [Default: True]"), ) @common.convert_api_faults def handle(self, *args, **options): if args: raise CommandError("Command doesn't accept any arguments") name = options["name"] user_id = options["user_id"] network_id = options["network_id"] server_id = options["server_id"] #router_id = options["router_id"] router_id = None # assume giving security groups comma separated security_group_ids = options["security-groups"] wait = parse_bool(options["wait"]) if not name: name = "" if not network_id: raise CommandError("Please specify a 'network'") vm = None owner = None if server_id: owner = "vm" vm = common.get_resource("server", server_id, for_update=True) #if vm.router: # raise CommandError("Server '%s' does not exist." % server_id) elif router_id: owner = "router" vm = common.get_resource("server", router_id, for_update=True) if not vm.router: raise CommandError("Router '%s' does not exist." % router_id) if user_id is None: if vm is not None: user_id = vm.userid else: raise CommandError("Please specify the owner of the port.") # get the network network = common.get_resource("network", network_id) # Get either floating IP or fixed ip address ipaddress = None floating_ip_id = options["floating_ip_id"] ipv4_address = options["ipv4_address"] if floating_ip_id: ipaddress = common.get_resource("floating-ip", floating_ip_id, for_update=True) if ipv4_address is not None and ipaddress.address != ipv4_address: raise CommandError("Floating IP address '%s' is different from" " specified address '%s'" % (ipaddress.address, ipv4_address)) # validate security groups sg_list = [] if security_group_ids: security_group_ids = security_group_ids.split(",") for gid in security_group_ids: sg = util.get_security_group(int(gid)) sg_list.append(sg) new_port = servers.create_port(user_id, network, machine=vm, name=name, use_ipaddress=ipaddress, address=ipv4_address, security_groups=sg_list, device_owner=owner) self.stdout.write("Created port '%s' in DB:\n" % new_port) pprint.pprint_port(new_port, stdout=self.stdout) pprint.pprint_port_ips(new_port, stdout=self.stdout) self.stdout.write("\n") if vm is not None: common.wait_server_task(new_port.machine, wait, stdout=self.stdout)
gpl-3.0
double12gzh/nova
nova/api/openstack/compute/contrib/flavor_rxtx.py
79
2175
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Flavor Rxtx API extension.""" from nova.api.openstack import extensions from nova.api.openstack import wsgi authorize = extensions.soft_extension_authorizer('compute', 'flavor_rxtx') class FlavorRxtxController(wsgi.Controller): def _extend_flavors(self, req, flavors): for flavor in flavors: db_flavor = req.get_db_flavor(flavor['id']) key = 'rxtx_factor' flavor[key] = db_flavor['rxtx_factor'] or "" def _show(self, req, resp_obj): if not authorize(req.environ['nova.context']): return if 'flavor' in resp_obj.obj: self._extend_flavors(req, [resp_obj.obj['flavor']]) @wsgi.extends def show(self, req, resp_obj, id): return self._show(req, resp_obj) @wsgi.extends(action='create') def create(self, req, resp_obj, body): return self._show(req, resp_obj) @wsgi.extends def detail(self, req, resp_obj): if not authorize(req.environ['nova.context']): return self._extend_flavors(req, list(resp_obj.obj['flavors'])) class Flavor_rxtx(extensions.ExtensionDescriptor): """Support to show the rxtx status of a flavor.""" name = "FlavorRxtx" alias = "os-flavor-rxtx" namespace = ("http://docs.openstack.org/compute/ext/" "flavor_rxtx/api/v1.1") updated = "2012-08-29T00:00:00Z" def get_controller_extensions(self): controller = FlavorRxtxController() extension = extensions.ControllerExtension(self, 'flavors', controller) return [extension]
apache-2.0
hyunchel/webargs
tests/test_tornadoparser.py
1
18291
# -*- coding: utf-8 -*- import json try: from urllib import urlencode # python2 except ImportError: from urllib.parse import urlencode # python3 import mock import pytest import tornado.web import tornado.httputil import tornado.httpserver import tornado.http1connection import tornado.concurrent import tornado.ioloop from tornado.testing import AsyncHTTPTestCase from webargs import fields, missing, ValidationError from webargs.tornadoparser import parser, use_args, use_kwargs, parse_json, get_value name = 'name' bvalue = b'value' value = 'value' def test_get_value_basic(): assert get_value({'foo': 42}, 'foo', False) == 42 assert get_value({'foo': 42}, 'bar', False) is missing assert get_value({'foos': ['a', 'b']}, 'foos', True) == ['a', 'b'] # https://github.com/sloria/webargs/pull/30 assert get_value({'foos': ['a', 'b']}, 'bar', True) is missing class TestQueryArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_get_single_values(self): query = [(name, value)] field = fields.Field() request = make_get_request(query) result = parser.parse_querystring(request, name, field) assert result == bvalue def test_it_should_get_multiple_values(self): query = [(name, value), (name, value)] field = fields.List(fields.Field()) request = make_get_request(query) result = parser.parse_querystring(request, name, field) assert result == [bvalue, bvalue] def test_it_should_return_missing_if_not_present(self): query = [] field = fields.Field() field2 = fields.List(fields.Int()) request = make_get_request(query) result = parser.parse_querystring(request, name, field) result2 = parser.parse_querystring(request, name, field2) assert result is missing assert result2 is missing def test_it_should_return_empty_list_if_multiple_and_not_present(self): query = [] field = fields.List(fields.Field()) request = make_get_request(query) result = parser.parse_querystring(request, name, field) assert result is missing class TestFormArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_get_single_values(self): query = [(name, value)] field = fields.Field() request = make_form_request(query) result = parser.parse_form(request, name, field) assert result == bvalue def test_it_should_get_multiple_values(self): query = [(name, value), (name, value)] field = fields.List(fields.Field()) request = make_form_request(query) result = parser.parse_form(request, name, field) assert result == [bvalue, bvalue] def test_it_should_return_missing_if_not_present(self): query = [] field = fields.Field() request = make_form_request(query) result = parser.parse_form(request, name, field) assert result is missing def test_it_should_return_empty_list_if_multiple_and_not_present(self): query = [] field = fields.List(fields.Field()) request = make_form_request(query) result = parser.parse_form(request, name, field) assert result is missing class TestJSONArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_get_single_values(self): query = {name: value} field = fields.Field() request = make_json_request(query) result = parser.parse_json(request, name, field) assert result == value def test_it_should_get_multiple_values(self): query = {name: [value, value]} field = fields.List(fields.Field()) request = make_json_request(query) result = parser.parse_json(request, name, field) assert result == [value, value] def test_it_should_get_multiple_nested_values(self): query = {name: [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}]} field = fields.List(fields.Nested({'id': fields.Field(), 'name': fields.Field()})) request = make_json_request(query) result = parser.parse_json(request, name, field) assert result == [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}] def test_it_should_return_missing_if_not_present(self): query = {} field = fields.Field() request = make_json_request(query) result = parser.parse_json(request, name, field) assert result is missing def test_it_should_return_empty_list_if_multiple_and_not_present(self): query = {} field = fields.List(fields.Field()) request = make_json_request(query) result = parser.parse_json(request, name, field) assert result is missing def test_it_should_handle_type_error_on_parse_json(self): field = fields.Field() request = make_request( body=tornado.concurrent.Future, headers={'Content-Type': 'application/json'}, ) result = parser.parse_json(request, name, field) assert parser._cache['json'] == {} assert result is missing def test_it_should_handle_value_error_on_parse_json(self): field = fields.Field() request = make_request('this is json not') result = parser.parse_json(request, name, field) assert parser._cache['json'] == {} assert result is missing class TestHeadersArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_get_single_values(self): query = {name: value} field = fields.Field() request = make_request(headers=query) result = parser.parse_headers(request, name, field) assert result == value def test_it_should_get_multiple_values(self): query = {name: [value, value]} field = fields.List(fields.Field()) request = make_request(headers=query) result = parser.parse_headers(request, name, field) assert result == [value, value] def test_it_should_return_missing_if_not_present(self): field = fields.Field(multiple=False) request = make_request() result = parser.parse_headers(request, name, field) assert result is missing def test_it_should_return_empty_list_if_multiple_and_not_present(self): query = {} field = fields.List(fields.Field()) request = make_request(headers=query) result = parser.parse_headers(request, name, field) assert result is missing class TestFilesArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_get_single_values(self): query = [(name, value)] field = fields.Field() request = make_files_request(query) result = parser.parse_files(request, name, field) assert result == value def test_it_should_get_multiple_values(self): query = [(name, value), (name, value)] field = fields.List(fields.Field()) request = make_files_request(query) result = parser.parse_files(request, name, field) assert result == [value, value] def test_it_should_return_missing_if_not_present(self): query = [] field = fields.Field() request = make_files_request(query) result = parser.parse_files(request, name, field) assert result is missing def test_it_should_return_empty_list_if_multiple_and_not_present(self): query = [] field = fields.List(fields.Field()) request = make_files_request(query) result = parser.parse_files(request, name, field) assert result is missing class TestErrorHandler(object): def test_it_should_raise_httperror_on_failed_validation(self): args = {'foo': fields.Field(validate=lambda x: False)} with pytest.raises(tornado.web.HTTPError): parser.parse(args, make_json_request({'foo': 42})) class TestParse(object): def setup_method(self, method): parser.clear_cache() def test_it_should_parse_query_arguments(self): attrs = { 'string': fields.Field(), 'integer': fields.List(fields.Int()) } request = make_get_request([ ('string', 'value'), ('integer', '1'), ('integer', '2') ]) parsed = parser.parse(attrs, request) assert parsed['integer'] == [1, 2] assert parsed['string'] == bvalue def test_parsing_clears_cache(self): request = make_json_request({ 'string': 'value', 'integer': [1, 2] }) string_result = parser.parse_json(request, 'string', fields.Str()) assert string_result == 'value' assert 'json' in parser._cache assert 'string' in parser._cache['json'] assert 'integer' in parser._cache['json'] attrs = {'string': fields.Str(), 'integer': fields.List(fields.Int())} parser.parse(attrs, request) assert parser._cache == {} def test_it_should_parse_form_arguments(self): attrs = { 'string': fields.Field(), 'integer': fields.List(fields.Int()), } request = make_form_request([ ('string', 'value'), ('integer', '1'), ('integer', '2') ]) parsed = parser.parse(attrs, request) assert parsed['integer'] == [1, 2] assert parsed['string'] == bvalue def test_it_should_parse_json_arguments(self): attrs = { 'string': fields.Str(), 'integer': fields.List(fields.Int()), } request = make_json_request({ 'string': 'value', 'integer': [1, 2] }) parsed = parser.parse(attrs, request) assert parsed['integer'] == [1, 2] assert parsed['string'] == value def test_it_should_parse_header_arguments(self): attrs = { 'string': fields.Str(), 'integer': fields.List(fields.Int()), } request = make_request(headers={ 'string': 'value', 'integer': ['1', '2'] }) parsed = parser.parse(attrs, request, locations=['headers']) assert parsed['string'] == value assert parsed['integer'] == [1, 2] def test_it_should_parse_cookies_arguments(self): attrs = { 'string': fields.Str(), 'integer': fields.List(fields.Int()), } request = make_cookie_request([ ('string', 'value'), ('integer', '1'), ('integer', '2') ]) parsed = parser.parse(attrs, request, locations=['cookies']) assert parsed['string'] == value assert parsed['integer'] == [2] def test_it_should_parse_files_arguments(self): attrs = { 'string': fields.Str(), 'integer': fields.List(fields.Int()), } request = make_files_request([ ('string', 'value'), ('integer', '1'), ('integer', '2') ]) parsed = parser.parse(attrs, request, locations=['files']) assert parsed['string'] == value assert parsed['integer'] == [1, 2] def test_it_should_parse_required_arguments(self): args = { 'foo': fields.Field(required=True), } request = make_json_request({}) with pytest.raises(tornado.web.HTTPError) as excinfo: parser.parse(args, request) assert 'Missing data for required field.' in str(excinfo) def test_it_should_parse_multiple_arg_required(self): args = { 'foo': fields.List(fields.Int(), required=True) } request = make_json_request({}) with pytest.raises(tornado.web.HTTPError) as excinfo: parser.parse(args, request) assert 'Missing data for required field.' in str(excinfo) class TestUseArgs(object): def setup_method(self, method): parser.clear_cache() def test_it_should_pass_parsed_as_first_argument(self): class Handler(object): request = make_json_request({'key': 'value'}) @use_args({'key': fields.Field()}) def get(self, *args, **kwargs): assert args[0] == {'key': 'value'} assert kwargs == {} return True handler = Handler() result = handler.get() assert result is True def test_it_should_pass_parsed_as_kwargs_arguments(self): class Handler(object): request = make_json_request({'key': 'value'}) @use_kwargs({'key': fields.Field()}) def get(self, *args, **kwargs): assert args == () assert kwargs == {'key': 'value'} return True handler = Handler() result = handler.get() assert result is True def test_it_should_be_validate_arguments_when_validator_is_passed(self): class Handler(object): request = make_json_request({'foo': 41}) @use_kwargs({'foo': fields.Int()}, validate=lambda args: args['foo'] > 42) def get(self, args): return True handler = Handler() with pytest.raises(tornado.web.HTTPError): handler.get() def make_uri(args): return '/test?' + urlencode(args) def make_form_body(args): return urlencode(args) def make_json_body(args): return json.dumps(args) def make_get_request(args): return make_request(uri=make_uri(args)) def make_form_request(args): return make_request( body=make_form_body(args), headers={ 'Content-Type': 'application/x-www-form-urlencoded' } ) def make_json_request(args): return make_request( body=make_json_body(args), headers={ 'Content-Type': 'application/json; charset=UTF-8' } ) def make_cookie_request(args): return make_request( headers={ 'Cookie': ' ;'.join('='.join(pair) for pair in args) } ) def make_files_request(args): files = {} for key, value in args: if isinstance(value, list): files.setdefault(key, []).extend(value) else: files.setdefault(key, []).append(value) return make_request(files=files) def make_request(uri=None, body=None, headers=None, files=None): uri = uri if uri is not None else u'' body = body if body is not None else u'' method = 'POST' if body else 'GET' # Need to make a mock connection right now because Tornado 4.0 requires a # remote_ip in the context attribute. 4.1 addresses this, and this # will be unnecessary once it is released # https://github.com/tornadoweb/tornado/issues/1118 mock_connection = mock.Mock(spec=tornado.http1connection.HTTP1Connection) mock_connection.context = mock.Mock() mock_connection.remote_ip = None content_type = headers.get('Content-Type', u'') if headers else u'' request = tornado.httputil.HTTPServerRequest( method=method, uri=uri, body=body, headers=headers, files=files, connection=mock_connection ) tornado.httputil.parse_body_arguments( content_type=content_type, body=body.encode('latin-1') if hasattr(body, 'encode') else body, arguments=request.body_arguments, files=request.files ) return request class EchoHandler(tornado.web.RequestHandler): ARGS = { 'name': fields.Str(), } @use_args(ARGS) def get(self, args): self.write(args) @use_args(ARGS) def post(self, args): self.write(args) echo_app = tornado.web.Application([ (r'/echo', EchoHandler) ]) class TestApp(AsyncHTTPTestCase): def get_app(self): return echo_app def test_post(self): res = self.fetch('/echo', method='POST', headers={'Content-Type': 'application/json'}, body=json.dumps({'name': 'Steve'})) json_body = parse_json(res.body) assert json_body['name'] == 'Steve' res = self.fetch('/echo', method='POST', headers={'Content-Type': 'application/json'}, body=json.dumps({})) json_body = parse_json(res.body) assert 'name' not in json_body def test_get_with_no_json_body(self): res = self.fetch('/echo', method='GET', headers={'Content-Type': 'application/json'}) json_body = parse_json(res.body) assert 'name' not in json_body class ValidateHandler(tornado.web.RequestHandler): ARGS = { 'name': fields.Str(required=True), } @use_args(ARGS) def post(self, args): self.write(args) @use_kwargs(ARGS) def get(self, name): self.write({'status': 'success'}) def always_fail(val): raise ValidationError('something went wrong') class AlwaysFailHandler(tornado.web.RequestHandler): ARGS = { 'name': fields.Str(validate=always_fail) } @use_args(ARGS) def post(self, args): self.write(args) validate_app = tornado.web.Application([ (r'/echo', ValidateHandler), (r'/alwaysfail', AlwaysFailHandler), ]) class TestValidateApp(AsyncHTTPTestCase): def get_app(self): return validate_app def test_required_field_provided(self): res = self.fetch( '/echo', method='POST', headers={'Content-Type': 'application/json'}, body=json.dumps({'name': 'johnny'}), ) json_body = parse_json(res.body) assert json_body['name'] == 'johnny' def test_missing_required_field_throws_422(self): res = self.fetch( '/echo', method='POST', headers={'Content-Type': 'application/json'}, body=json.dumps({'occupation': 'pizza'}), ) assert res.code == 422 def test_use_kwargs_with_error(self): res = self.fetch( '/echo', method='GET', ) assert res.code == 422 if __name__ == '__main__': echo_app.listen(8888) tornado.ioloop.IOLoop.instance().start()
mit
xzturn/tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_test.py
9
14495
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg as linalg_lib from tensorflow.python.platform import test linalg = linalg_lib rng = np.random.RandomState(123) class LinearOperatorShape(linalg.LinearOperator): """LinearOperator that implements the methods ._shape and _shape_tensor.""" def __init__(self, shape, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None): self._stored_shape = shape super(LinearOperatorShape, self).__init__( dtype=dtypes.float32, graph_parents=None, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square) def _shape(self): return tensor_shape.TensorShape(self._stored_shape) def _shape_tensor(self): return constant_op.constant(self._stored_shape, dtype=dtypes.int32) def _matmul(self): raise NotImplementedError("Not needed for this test.") class LinearOperatorMatmulSolve(linalg.LinearOperator): """LinearOperator that wraps a [batch] matrix and implements matmul/solve.""" def __init__(self, matrix, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None): self._matrix = ops.convert_to_tensor(matrix, name="matrix") super(LinearOperatorMatmulSolve, self).__init__( dtype=self._matrix.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square) def _shape(self): return self._matrix.shape def _shape_tensor(self): return array_ops.shape(self._matrix) def _matmul(self, x, adjoint=False, adjoint_arg=False): x = ops.convert_to_tensor(x, name="x") return math_ops.matmul( self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg) def _solve(self, rhs, adjoint=False, adjoint_arg=False): rhs = ops.convert_to_tensor(rhs, name="rhs") assert not adjoint_arg, "Not implemented for this test class." return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint) @test_util.run_all_in_graph_and_eager_modes class LinearOperatorTest(test.TestCase): def test_all_shape_properties_defined_by_the_one_property_shape(self): shape = (1, 2, 3, 4) operator = LinearOperatorShape(shape) self.assertAllEqual(shape, operator.shape) self.assertAllEqual(4, operator.tensor_rank) self.assertAllEqual((1, 2), operator.batch_shape) self.assertAllEqual(4, operator.domain_dimension) self.assertAllEqual(3, operator.range_dimension) def test_all_shape_methods_defined_by_the_one_method_shape(self): with self.cached_session(): shape = (1, 2, 3, 4) operator = LinearOperatorShape(shape) self.assertAllEqual(shape, self.evaluate(operator.shape_tensor())) self.assertAllEqual(4, self.evaluate(operator.tensor_rank_tensor())) self.assertAllEqual((1, 2), self.evaluate(operator.batch_shape_tensor())) self.assertAllEqual(4, self.evaluate(operator.domain_dimension_tensor())) self.assertAllEqual(3, self.evaluate(operator.range_dimension_tensor())) def test_is_x_properties(self): operator = LinearOperatorShape( shape=(2, 2), is_non_singular=False, is_self_adjoint=True, is_positive_definite=False) self.assertFalse(operator.is_non_singular) self.assertTrue(operator.is_self_adjoint) self.assertFalse(operator.is_positive_definite) def test_generic_to_dense_method_non_square_matrix_static(self): matrix = rng.randn(2, 3, 4) operator = LinearOperatorMatmulSolve(matrix) with self.cached_session(): operator_dense = operator.to_dense() self.assertAllEqual((2, 3, 4), operator_dense.shape) self.assertAllClose(matrix, self.evaluate(operator_dense)) def test_generic_to_dense_method_non_square_matrix_tensor(self): matrix = rng.randn(2, 3, 4) matrix_ph = array_ops.placeholder_with_default(input=matrix, shape=None) operator = LinearOperatorMatmulSolve(matrix_ph) operator_dense = operator.to_dense() self.assertAllClose(matrix, self.evaluate(operator_dense)) def test_matvec(self): matrix = [[1., 0], [0., 2.]] operator = LinearOperatorMatmulSolve(matrix) x = [1., 1.] with self.cached_session(): y = operator.matvec(x) self.assertAllEqual((2,), y.shape) self.assertAllClose([1., 2.], self.evaluate(y)) def test_solvevec(self): matrix = [[1., 0], [0., 2.]] operator = LinearOperatorMatmulSolve(matrix) y = [1., 1.] with self.cached_session(): x = operator.solvevec(y) self.assertAllEqual((2,), x.shape) self.assertAllClose([1., 1 / 2.], self.evaluate(x)) def test_is_square_set_to_true_for_square_static_shapes(self): operator = LinearOperatorShape(shape=(2, 4, 4)) self.assertTrue(operator.is_square) def test_is_square_set_to_false_for_square_static_shapes(self): operator = LinearOperatorShape(shape=(2, 3, 4)) self.assertFalse(operator.is_square) def test_is_square_set_incorrectly_to_false_raises(self): with self.assertRaisesRegexp(ValueError, "but.*was square"): _ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square def test_is_square_set_inconsistent_with_other_hints_raises(self): with self.assertRaisesRegexp(ValueError, "is always square"): matrix = array_ops.placeholder_with_default(input=(), shape=None) LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False) with self.assertRaisesRegexp(ValueError, "is always square"): matrix = array_ops.placeholder_with_default(input=(), shape=None) LinearOperatorMatmulSolve( matrix, is_positive_definite=True, is_square=False) def test_non_square_operators_raise_on_determinant_and_solve(self): operator = LinearOperatorShape((2, 3)) with self.assertRaisesRegexp(NotImplementedError, "not be square"): operator.determinant() with self.assertRaisesRegexp(NotImplementedError, "not be square"): operator.log_abs_determinant() with self.assertRaisesRegexp(NotImplementedError, "not be square"): operator.solve(rng.rand(2, 2)) with self.assertRaisesRegexp(ValueError, "is always square"): matrix = array_ops.placeholder_with_default(input=(), shape=None) LinearOperatorMatmulSolve( matrix, is_positive_definite=True, is_square=False) def test_is_square_manual_set_works(self): matrix = array_ops.placeholder_with_default( input=np.ones((2, 2)), shape=None) operator = LinearOperatorMatmulSolve(matrix) if not context.executing_eagerly(): # Eager mode will read in the default value, and discover the answer is # True. Graph mode must rely on the hint, since the placeholder has # shape=None...the hint is, by default, None. self.assertEqual(None, operator.is_square) # Set to True operator = LinearOperatorMatmulSolve(matrix, is_square=True) self.assertTrue(operator.is_square) def test_linear_operator_matmul_hints_closed(self): matrix = array_ops.placeholder_with_default(input=np.ones((2, 2)), shape=None) operator1 = LinearOperatorMatmulSolve(matrix) operator_matmul = operator1.matmul(operator1) if not context.executing_eagerly(): # Eager mode will read in the input and discover matrix is square. self.assertEqual(None, operator_matmul.is_square) self.assertEqual(None, operator_matmul.is_non_singular) self.assertEqual(None, operator_matmul.is_self_adjoint) self.assertEqual(None, operator_matmul.is_positive_definite) operator2 = LinearOperatorMatmulSolve( matrix, is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True, ) operator_matmul = operator2.matmul(operator2) self.assertTrue(operator_matmul.is_square) self.assertTrue(operator_matmul.is_non_singular) self.assertEqual(None, operator_matmul.is_self_adjoint) self.assertEqual(None, operator_matmul.is_positive_definite) def test_linear_operator_matmul_hints_false(self): matrix1 = array_ops.placeholder_with_default( input=rng.rand(2, 2), shape=None) operator1 = LinearOperatorMatmulSolve( matrix1, is_non_singular=False, is_self_adjoint=False, is_positive_definite=False, is_square=True, ) operator_matmul = operator1.matmul(operator1) self.assertTrue(operator_matmul.is_square) self.assertFalse(operator_matmul.is_non_singular) self.assertEqual(None, operator_matmul.is_self_adjoint) self.assertEqual(None, operator_matmul.is_positive_definite) matrix2 = array_ops.placeholder_with_default( input=rng.rand(2, 3), shape=None) operator2 = LinearOperatorMatmulSolve( matrix2, is_non_singular=False, is_self_adjoint=False, is_positive_definite=False, is_square=False, ) operator_matmul = operator2.matmul(operator2, adjoint_arg=True) if context.executing_eagerly(): self.assertTrue(operator_matmul.is_square) # False since we specified is_non_singular=False. self.assertFalse(operator_matmul.is_non_singular) else: self.assertIsNone(operator_matmul.is_square) # May be non-singular, since it's the composition of two non-square. # TODO(b/136162840) This is a bit inconsistent, and should probably be # False since we specified operator2.is_non_singular == False. self.assertIsNone(operator_matmul.is_non_singular) # No way to deduce these, even in Eager mode. self.assertIsNone(operator_matmul.is_self_adjoint) self.assertIsNone(operator_matmul.is_positive_definite) def test_linear_operator_matmul_hint_infer_square(self): matrix1 = array_ops.placeholder_with_default( input=rng.rand(2, 3), shape=(2, 3)) matrix2 = array_ops.placeholder_with_default( input=rng.rand(3, 2), shape=(3, 2)) matrix3 = array_ops.placeholder_with_default( input=rng.rand(3, 4), shape=(3, 4)) operator1 = LinearOperatorMatmulSolve(matrix1, is_square=False) operator2 = LinearOperatorMatmulSolve(matrix2, is_square=False) operator3 = LinearOperatorMatmulSolve(matrix3, is_square=False) self.assertTrue(operator1.matmul(operator2).is_square) self.assertTrue(operator2.matmul(operator1).is_square) self.assertFalse(operator1.matmul(operator3).is_square) def testDispatchedMethods(self): operator = linalg.LinearOperatorFullMatrix( [[1., 0.5], [0.5, 1.]], is_square=True, is_self_adjoint=True, is_non_singular=True, is_positive_definite=True) methods = { "trace": linalg.trace, "diag_part": linalg.diag_part, "log_abs_determinant": linalg.logdet, "determinant": linalg.det } for method in methods: op_val = getattr(operator, method)() linalg_val = methods[method](operator) self.assertAllClose( self.evaluate(op_val), self.evaluate(linalg_val)) # Solve and Matmul go here. adjoint = linalg.adjoint(operator) self.assertIsInstance(adjoint, linalg.LinearOperator) cholesky = linalg.cholesky(operator) self.assertIsInstance(cholesky, linalg.LinearOperator) inverse = linalg.inv(operator) self.assertIsInstance(inverse, linalg.LinearOperator) def testDispatchMatmulSolve(self): operator = linalg.LinearOperatorFullMatrix( np.float64([[1., 0.5], [0.5, 1.]]), is_square=True, is_self_adjoint=True, is_non_singular=True, is_positive_definite=True) rhs = np.random.uniform(-1., 1., size=[3, 2, 2]) for adjoint in [False, True]: for adjoint_arg in [False, True]: op_val = operator.matmul( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) matmul_val = math_ops.matmul( operator, rhs, adjoint_a=adjoint, adjoint_b=adjoint_arg) self.assertAllClose( self.evaluate(op_val), self.evaluate(matmul_val)) op_val = operator.solve(rhs, adjoint=adjoint) solve_val = linalg.solve(operator, rhs, adjoint=adjoint) self.assertAllClose( self.evaluate(op_val), self.evaluate(solve_val)) def testDispatchMatmulLeftOperatorIsTensor(self): mat = np.float64([[1., 0.5], [0.5, 1.]]) right_operator = linalg.LinearOperatorFullMatrix( mat, is_square=True, is_self_adjoint=True, is_non_singular=True, is_positive_definite=True) lhs = np.random.uniform(-1., 1., size=[3, 2, 2]) for adjoint in [False, True]: for adjoint_arg in [False, True]: op_val = math_ops.matmul( lhs, mat, adjoint_a=adjoint, adjoint_b=adjoint_arg) matmul_val = math_ops.matmul( lhs, right_operator, adjoint_a=adjoint, adjoint_b=adjoint_arg) self.assertAllClose( self.evaluate(op_val), self.evaluate(matmul_val)) if __name__ == "__main__": test.main()
apache-2.0
KamillaKhabibrakhmanova/fish
node_modules/ionic/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py
2354
10366
# Unmodified from http://code.activestate.com/recipes/576693/ # other than to add MIT license header (as specified on page, but not in code). # Linked from Python documentation here: # http://docs.python.org/2/library/collections.html#collections.OrderedDict # # This should be deleted once Py2.7 is available on all bots, see # http://crbug.com/241769. # # Copyright (c) 2009 Raymond Hettinger. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) # Suppress 'OrderedDict.update: Method has no argument': # pylint: disable=E0211 def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
mit
espressopp/espressopp
src/integrator/FixPositions.py
1
2020
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ********************************** espressopp.integrator.FixPositions ********************************** .. function:: espressopp.integrator.FixPositions(system, particleGroup, fixMask) :param system: :param particleGroup: :param fixMask: :type system: :type particleGroup: :type fixMask: """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.integrator.Extension import * from _espressopp import integrator_FixPositions class FixPositionsLocal(ExtensionLocal, integrator_FixPositions): def __init__(self, system, particleGroup, fixMask): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, integrator_FixPositions, system, particleGroup, fixMask) if pmi.isController : class FixPositions(Extension, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.integrator.FixPositionsLocal', pmicall = ['setFixMask', 'getFixMask'], pmiproperty = [ 'particleGroup' ] )
gpl-3.0
mhnatiuk/phd_sociology_of_religion
scrapper/lib/python2.7/site-packages/twisted/lore/lmath.py
60
3037
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ LaTeX-defined image support for Lore documents. """ import os, tempfile from xml.dom import minidom as dom from twisted.web import domhelpers import latex, tree, lint, default class MathLatexSpitter(latex.LatexSpitter): start_html = '\\documentclass{amsart}\n' def visitNode_div_latexmacros(self, node): self.writer(domhelpers.getNodeText(node)) def visitNode_span_latexformula(self, node): self.writer('\[') self.writer(domhelpers.getNodeText(node)) self.writer('\]') def formulaeToImages(document, dir, _system=os.system): # gather all macros macros = '' for node in domhelpers.findElementsWithAttribute(document, 'class', 'latexmacros'): macros += domhelpers.getNodeText(node) node.parentNode.removeChild(node) i = 0 for node in domhelpers.findElementsWithAttribute(document, 'class', 'latexformula'): latexText='''\\documentclass[12pt]{amsart}%s \\begin{document}\[%s\] \\end{document}''' % (macros, domhelpers.getNodeText(node)) # This file really should be cleaned up by this function, or placed # somewhere such that the calling code can find it and clean it up. file = tempfile.mktemp() f = open(file+'.tex', 'w') f.write(latexText) f.close() _system('latex %s.tex' % file) _system('dvips %s.dvi -o %s.ps' % (os.path.basename(file), file)) baseimgname = 'latexformula%d.png' % i imgname = os.path.join(dir, baseimgname) i += 1 _system('pstoimg -type png -crop a -trans -interlace -out ' '%s %s.ps' % (imgname, file)) newNode = dom.parseString( '<span><br /><img src="%s" /><br /></span>' % ( baseimgname,)).documentElement node.parentNode.replaceChild(newNode, node) def doFile(fn, docsdir, ext, url, templ, linkrel='', d=None): d = d or {} doc = tree.parseFileAndReport(fn) formulaeToImages(doc, os.path.dirname(fn)) cn = templ.cloneNode(1) tree.munge(doc, cn, linkrel, docsdir, fn, ext, url, d) cn.writexml(open(os.path.splitext(fn)[0]+ext, 'wb')) class ProcessingFunctionFactory(default.ProcessingFunctionFactory): latexSpitters = {None: MathLatexSpitter} def getDoFile(self): return doFile def getLintChecker(self): checker = lint.getDefaultChecker() checker.allowedClasses = checker.allowedClasses.copy() oldDiv = checker.allowedClasses['div'] oldSpan = checker.allowedClasses['span'] checker.allowedClasses['div'] = lambda x:oldDiv(x) or x=='latexmacros' checker.allowedClasses['span'] = (lambda x:oldSpan(x) or x=='latexformula') return checker factory = ProcessingFunctionFactory()
gpl-2.0
pluskid/mxnet
python/mxnet/executor.py
14
19510
# coding: utf-8 # pylint: disable=invalid-name, protected-access, too-many-locals, too-many-arguments """Symbolic Executor component of MXNet.""" from __future__ import absolute_import import ctypes import copy import warnings import numpy as np from .base import _LIB from .base import mx_uint, NDArrayHandle, ExecutorHandle from .base import check_call, c_array, py_str from .ndarray import NDArray from . import ndarray as nd # those functions are not used here, we just import them to keep backward compatibility # in case the end user calls them, as they originally lives here # pylint: disable=unused-import from .executor_manager import _split_input_slice, _check_arguments, _load_data, _load_label def _monitor_callback_wrapper(callback): """A wrapper for the user-defined handle.""" def callback_handle(name, array, _): """ ctypes function """ callback(name, array) return callback_handle class Executor(object): """Executor is the object providing efficient symbolic graph execution and optimization. Examples -------- >>> # typical approach to create an executor is to bind symbol >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}) """ def __init__(self, handle, symbol, ctx, grad_req, group2ctx): """Constructor, used Symbol.bind and Symbol.simple_bind instead. Parameters ---------- handle: ExecutorHandle ExecutorHandle generated by calling `bind`. See Also -------- Symbol.bind : to create executor. """ if not isinstance(handle, ExecutorHandle): raise TypeError("Handle type error") self.handle = handle self.arg_arrays = [] self.grad_arrays = [] self.aux_arrays = [] self.outputs = self._get_outputs() self._symbol = copy.deepcopy(symbol) self._arg_dict = None self._grad_dict = None self._aux_dict = None self._output_dict = None self._monitor_callback = None self._output_dirty = False self._ctx = copy.deepcopy(ctx) self._grad_req = copy.deepcopy(grad_req) self._group2ctx = copy.deepcopy(group2ctx) def __del__(self): check_call(_LIB.MXExecutorFree(self.handle)) @staticmethod def _get_dict(names, ndarrays): """Get the dictionary given name and ndarray pairs.""" nset = set() for nm in names: if nm in nset: raise ValueError('Duplicate names detected, %s' % str(names)) nset.add(nm) return dict(zip(names, ndarrays)) def _get_outputs(self): """List all the output NDArray. Returns ------- A list of ndarray bound to the heads of executor. """ out_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() check_call(_LIB.MXExecutorOutputs(self.handle, ctypes.byref(out_size), ctypes.byref(handles))) return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)] def forward(self, is_train=False, **kwargs): """Calculate the outputs specified by the bound symbol. Parameters ---------- is_train: bool, optional Whether this forward is for evaluation purpose. If True, a backward call is expected to follow. Otherwise following backward is invalid. **kwargs Additional specification of input arguments. Examples -------- >>> # doing forward by specifying data >>> texec.forward(is_train=True, data=mydata) >>> # doing forward by not specifying things, but copy to the executor before hand >>> mydata.copyto(texec.arg_dict['data']) >>> texec.forward(is_train=True) >>> # doing forward by specifying data and get outputs >>> outputs = texec.forward(is_train=True, data=mydata) >>> print(outputs[0].asnumpy()) """ if len(kwargs) != 0: arg_dict = self.arg_dict for name, array in kwargs.items(): if not isinstance(array, (NDArray, np.ndarray)): raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray') if name not in arg_dict: raise TypeError('Unknown argument %s' % name) if arg_dict[name].shape != array.shape: raise ValueError('Shape not match! Argument %s, need: %s, received: %s' %(name, str(arg_dict[name].shape), str(array.shape))) arg_dict[name][:] = array check_call(_LIB.MXExecutorForward( self.handle, ctypes.c_int(int(is_train)))) if self._output_dirty: warnings.warn( "Calling forward the second time after forward(is_train=True) " "without calling backward first. Is this intended?", stacklevel=2) self._output_dirty = is_train return self.outputs def backward(self, out_grads=None): """Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.] """ if out_grads is None: out_grads = [] elif isinstance(out_grads, NDArray): out_grads = [out_grads] elif isinstance(out_grads, dict): out_grads = [out_grads[k] for k in self._symbol.list_outputs()] for obj in out_grads: if not isinstance(obj, NDArray): raise TypeError("inputs must be NDArray") ndarray = c_array(NDArrayHandle, [item.handle for item in out_grads]) check_call(_LIB.MXExecutorBackward( self.handle, mx_uint(len(out_grads)), ndarray)) if not self._output_dirty: warnings.warn( "Calling backward without calling forward(is_train=True) " "first. Behavior is undefined.", stacklevel=2) self._output_dirty = False def set_monitor_callback(self, callback): """Install callback for monitor. Parameters ---------- callback : function Takes a string and an NDArrayHandle. Examples -------- >>> def mon_callback(*args, **kwargs): >>> print("Do your stuff here.") >>> >>> texe.set_monitor_callback(mon_callback) """ cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p) self._monitor_callback = cb_type(_monitor_callback_wrapper(callback)) check_call(_LIB.MXExecutorSetMonitorCallback( self.handle, self._monitor_callback, None)) @property def arg_dict(self): """Get dictionary representation of argument arrrays. Returns ------- arg_dict : dict of str to NDArray The dictionary that maps the names of arguments to NDArrays. Raises ------ ValueError : if there are duplicated names in the arguments. """ if self._arg_dict is None: self._arg_dict = Executor._get_dict( self._symbol.list_arguments(), self.arg_arrays) return self._arg_dict @property def grad_dict(self): """Get dictionary representation of gradient arrays. Returns ------- grad_dict : dict of str to NDArray The dictionary that maps name of arguments to gradient arrays. """ if self._grad_dict is None: self._grad_dict = Executor._get_dict( self._symbol.list_arguments(), self.grad_arrays) return self._grad_dict @property def aux_dict(self): """Get dictionary representation of auxiliary states arrays. Returns ------- aux_dict : dict of str to NDArray The dictionary that maps name of auxiliary states to NDArrays. Raises ------ ValueError : if there are duplicated names in the auxiliary states. """ if self._aux_dict is None: self._aux_dict = Executor._get_dict( self._symbol.list_auxiliary_states(), self.aux_arrays) return self._aux_dict @property def output_dict(self): """Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs. """ if self._output_dict is None: self._output_dict = Executor._get_dict( self._symbol.list_outputs(), self.outputs) return self._output_dict def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False): """Copy parameters from arg_params, aux_params into executor's internal array. Parameters ---------- arg_params : dict of str to NDArray Parameters, dict of name to NDArray of arguments. aux_params : dict of str to NDArray, optional Parameters, dict of name to NDArray of auxiliary states. allow_extra_params : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Raises ------ ValueError If there is additional parameters in the dict but ``allow_extra_params=False``. Examples -------- >>> # set parameters with existing model checkpoint >>> model_prefix = 'mx_mlp' >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0) >>> texec.copy_params_from(arg_params, aux_params) """ for name, array in arg_params.items(): if name in self.arg_dict: dst = self.arg_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name \"%s\" that is not in the arguments' % name) if aux_params is None: return for name, array in aux_params.items(): if name in self.aux_dict: dst = self.aux_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name %s that is not in the auxiliary states' % name) def reshape(self, partial_shaping=False, allow_up_sizing=False, **kwargs): """Return a new executor with the same symbol and shared memory, but different input/output shapes. For runtime reshaping, variable length sequences, etc. The returned executor shares state with the current one, and cannot be used in parallel with it. Parameters ---------- partial_shaping : bool Whether to allow changing the shape of unspecified arguments. allow_up_sizing : bool Whether to allow allocating new ndarrays that's larger than the original. kwargs : dict of string to tuple of int New shape for arguments. Returns ------- exec : Executor A new executor that shares memory with self. Examples -------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.zeros((2, 1)), 'b': mx.nd.ones((2,1))}) >>> new_shape = {'a': (4, 2), 'b': (4, 2)} >>> texec.reshape(allow_up_sizing=True, **new_shape) """ # pylint: disable=too-many-branches arg_shapes, _, aux_shapes = self._symbol.infer_shape(**kwargs) if arg_shapes is None: raise ValueError("Insufficient argument shapes provided.") new_arg_dict = {} new_grad_dict = {} for i, name in enumerate(self._symbol.list_arguments()): new_shape = arg_shapes[i] arr = self.arg_arrays[i] darr = None if self.grad_arrays is None else self.grad_arrays[i] if partial_shaping or name in kwargs or new_shape == arr.shape: if np.prod(new_shape) > np.prod(arr.shape): assert allow_up_sizing, "New shape of arg:%s larger than original. "%name + \ "First making a big executor and then down sizing it " + \ "is more efficient than the reverse." + \ "If you really want to up size, set allow_up_sizing=True " + \ "to enable allocation of new arrays." new_arg_dict[name] = nd.empty(new_shape, ctx=arr.context, dtype=arr.dtype) if darr is not None: new_grad_dict[name] = nd.empty(new_shape, ctx=darr.context, dtype=arr.dtype) else: new_arg_dict[name] = arr.reshape(new_shape) if darr is not None: new_grad_dict[name] = darr.reshape(new_shape) else: raise AssertionError("Shape of unspecified array arg:%s changed. "%name + \ "This can cause the new executor to not share parameters " + \ "with the old one. Please check for error in network." +\ "If this is intended, set partial_shaping=True to suppress this warning.") new_aux_dict = {} for name, new_shape, arr in zip(self._symbol.list_auxiliary_states(), aux_shapes, self.aux_arrays): if partial_shaping or new_shape == arr.shape: if np.prod(new_shape) > np.prod(arr.shape): assert allow_up_sizing, "New shape of arg:%s larger than original. "%name + \ "First making a big executor and then down sizing it " + \ "is more efficient than the reverse." + \ "If you really want to up size, set allow_up_sizing=True " + \ "to enable allocation of new arrays." new_aux_dict[name] = nd.empty(new_shape, ctx=arr.context, dtype=arr.dtype) else: new_aux_dict[name] = arr.reshape(new_shape) else: raise AssertionError("Shape of unspecified array aux:%s changed. "%name + \ "This can cause the new executor to not share parameters " + \ "with the old one. Please check for error in network." +\ "If this is intended, set partial_shaping=True to suppress this warning.") return self._symbol.bind(self._ctx, args=new_arg_dict, args_grad=new_grad_dict, grad_req=self._grad_req, aux_states=new_aux_dict, group2ctx=self._group2ctx, shared_exec=self) def debug_str(self): """Get a debug string about internal execution plan. Returns ------- debug_str : string Debug string of the executor. Examples -------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.sin(a) >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}) >>> print(texec.debug_str()) Symbol Outputs: output[0]=_plus0(0) Variable:a -------------------- Op:_mul_scalar, Name=_mulscalar0 Inputs: arg[0]=a(0) version=0 Attrs: scalar=2 -------------------- Op:sin, Name=sin0 Inputs: arg[0]=a(0) version=0 -------------------- Op:elemwise_add, Name=_plus0 Inputs: arg[0]=_mulscalar0(0) arg[1]=sin0(0) Total 0 MB allocated Total 11 TempSpace resource requested """ debug_str = ctypes.c_char_p() check_call(_LIB.MXExecutorPrint( self.handle, ctypes.byref(debug_str))) return py_str(debug_str.value)
apache-2.0
dhruvsrivastava/OJ
flask/lib/python2.7/site-packages/sqlalchemy/testing/exclusions.py
36
12495
# testing/exclusions.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import operator from ..util import decorator from . import config from .. import util import inspect import contextlib def skip_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.skips.add(pred) return rule def fails_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.fails.add(pred) return rule class compound(object): def __init__(self): self.fails = set() self.skips = set() self.tags = set() def __add__(self, other): return self.add(other) def add(self, *others): copy = compound() copy.fails.update(self.fails) copy.skips.update(self.skips) copy.tags.update(self.tags) for other in others: copy.fails.update(other.fails) copy.skips.update(other.skips) copy.tags.update(other.tags) return copy def not_(self): copy = compound() copy.fails.update(NotPredicate(fail) for fail in self.fails) copy.skips.update(NotPredicate(skip) for skip in self.skips) copy.tags.update(self.tags) return copy @property def enabled(self): return self.enabled_for_config(config._current) def enabled_for_config(self, config): for predicate in self.skips.union(self.fails): if predicate(config): return False else: return True def matching_config_reasons(self, config): return [ predicate._as_string(config) for predicate in self.skips.union(self.fails) if predicate(config) ] def include_test(self, include_tags, exclude_tags): return bool( not self.tags.intersection(exclude_tags) and (not include_tags or self.tags.intersection(include_tags)) ) def _extend(self, other): self.skips.update(other.skips) self.fails.update(other.fails) self.tags.update(other.tags) def __call__(self, fn): if hasattr(fn, '_sa_exclusion_extend'): fn._sa_exclusion_extend._extend(self) return fn @decorator def decorate(fn, *args, **kw): return self._do(config._current, fn, *args, **kw) decorated = decorate(fn) decorated._sa_exclusion_extend = self return decorated @contextlib.contextmanager def fail_if(self): all_fails = compound() all_fails.fails.update(self.skips.union(self.fails)) try: yield except Exception as ex: all_fails._expect_failure(config._current, ex) else: all_fails._expect_success(config._current) def _do(self, config, fn, *args, **kw): for skip in self.skips: if skip(config): msg = "'%s' : %s" % ( fn.__name__, skip._as_string(config) ) config.skip_test(msg) try: return_value = fn(*args, **kw) except Exception as ex: self._expect_failure(config, ex, name=fn.__name__) else: self._expect_success(config, name=fn.__name__) return return_value def _expect_failure(self, config, ex, name='block'): for fail in self.fails: if fail(config): print(("%s failed as expected (%s): %s " % ( name, fail._as_string(config), str(ex)))) break else: util.raise_from_cause(ex) def _expect_success(self, config, name='block'): if not self.fails: return for fail in self.fails: if not fail(config): break else: raise AssertionError( "Unexpected success for '%s' (%s)" % ( name, " and ".join( fail._as_string(config) for fail in self.fails ) ) ) def requires_tag(tagname): return tags([tagname]) def tags(tagnames): comp = compound() comp.tags.update(tagnames) return comp def only_if(predicate, reason=None): predicate = _as_predicate(predicate) return skip_if(NotPredicate(predicate), reason) def succeeds_if(predicate, reason=None): predicate = _as_predicate(predicate) return fails_if(NotPredicate(predicate), reason) class Predicate(object): @classmethod def as_predicate(cls, predicate, description=None): if isinstance(predicate, compound): return cls.as_predicate(predicate.enabled_for_config, description) elif isinstance(predicate, Predicate): if description and predicate.description is None: predicate.description = description return predicate elif isinstance(predicate, (list, set)): return OrPredicate( [cls.as_predicate(pred) for pred in predicate], description) elif isinstance(predicate, tuple): return SpecPredicate(*predicate) elif isinstance(predicate, util.string_types): tokens = predicate.split(" ", 2) op = spec = None db = tokens.pop(0) if tokens: op = tokens.pop(0) if tokens: spec = tuple(int(d) for d in tokens.pop(0).split(".")) return SpecPredicate(db, op, spec, description=description) elif util.callable(predicate): return LambdaPredicate(predicate, description) else: assert False, "unknown predicate type: %s" % predicate def _format_description(self, config, negate=False): bool_ = self(config) if negate: bool_ = not negate return self.description % { "driver": config.db.url.get_driver_name(), "database": config.db.url.get_backend_name(), "doesnt_support": "doesn't support" if bool_ else "does support", "does_support": "does support" if bool_ else "doesn't support" } def _as_string(self, config=None, negate=False): raise NotImplementedError() class BooleanPredicate(Predicate): def __init__(self, value, description=None): self.value = value self.description = description or "boolean %s" % value def __call__(self, config): return self.value def _as_string(self, config, negate=False): return self._format_description(config, negate=negate) class SpecPredicate(Predicate): def __init__(self, db, op=None, spec=None, description=None): self.db = db self.op = op self.spec = spec self.description = description _ops = { '<': operator.lt, '>': operator.gt, '==': operator.eq, '!=': operator.ne, '<=': operator.le, '>=': operator.ge, 'in': operator.contains, 'between': lambda val, pair: val >= pair[0] and val <= pair[1], } def __call__(self, config): engine = config.db if "+" in self.db: dialect, driver = self.db.split('+') else: dialect, driver = self.db, None if dialect and engine.name != dialect: return False if driver is not None and engine.driver != driver: return False if self.op is not None: assert driver is None, "DBAPI version specs not supported yet" version = _server_version(engine) oper = hasattr(self.op, '__call__') and self.op \ or self._ops[self.op] return oper(version, self.spec) else: return True def _as_string(self, config, negate=False): if self.description is not None: return self._format_description(config) elif self.op is None: if negate: return "not %s" % self.db else: return "%s" % self.db else: if negate: return "not %s %s %s" % ( self.db, self.op, self.spec ) else: return "%s %s %s" % ( self.db, self.op, self.spec ) class LambdaPredicate(Predicate): def __init__(self, lambda_, description=None, args=None, kw=None): spec = inspect.getargspec(lambda_) if not spec[0]: self.lambda_ = lambda db: lambda_() else: self.lambda_ = lambda_ self.args = args or () self.kw = kw or {} if description: self.description = description elif lambda_.__doc__: self.description = lambda_.__doc__ else: self.description = "custom function" def __call__(self, config): return self.lambda_(config) def _as_string(self, config, negate=False): return self._format_description(config) class NotPredicate(Predicate): def __init__(self, predicate, description=None): self.predicate = predicate self.description = description def __call__(self, config): return not self.predicate(config) def _as_string(self, config, negate=False): if self.description: return self._format_description(config, not negate) else: return self.predicate._as_string(config, not negate) class OrPredicate(Predicate): def __init__(self, predicates, description=None): self.predicates = predicates self.description = description def __call__(self, config): for pred in self.predicates: if pred(config): return True return False def _eval_str(self, config, negate=False): if negate: conjunction = " and " else: conjunction = " or " return conjunction.join(p._as_string(config, negate=negate) for p in self.predicates) def _negation_str(self, config): if self.description is not None: return "Not " + self._format_description(config) else: return self._eval_str(config, negate=True) def _as_string(self, config, negate=False): if negate: return self._negation_str(config) else: if self.description is not None: return self._format_description(config) else: return self._eval_str(config) _as_predicate = Predicate.as_predicate def _is_excluded(db, op, spec): return SpecPredicate(db, op, spec)(config._current) def _server_version(engine): """Return a server_version_info tuple.""" # force metadata to be retrieved conn = engine.connect() version = getattr(engine.dialect, 'server_version_info', ()) conn.close() return version def db_spec(*dbs): return OrPredicate( [Predicate.as_predicate(db) for db in dbs] ) def open(): return skip_if(BooleanPredicate(False, "mark as execute")) def closed(): return skip_if(BooleanPredicate(True, "marked as skip")) def fails(): return fails_if(BooleanPredicate(True, "expected to fail")) @decorator def future(fn, *arg): return fails_if(LambdaPredicate(fn), "Future feature") def fails_on(db, reason=None): return fails_if(SpecPredicate(db), reason) def fails_on_everything_except(*dbs): return succeeds_if( OrPredicate([ SpecPredicate(db) for db in dbs ]) ) def skip(db, reason=None): return skip_if(SpecPredicate(db), reason) def only_on(dbs, reason=None): return only_if( OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) ) def exclude(db, op, spec, reason=None): return skip_if(SpecPredicate(db, op, spec), reason) def against(config, *queries): assert queries, "no queries sent!" return OrPredicate([ Predicate.as_predicate(query) for query in queries ])(config)
bsd-3-clause
softDi/clusim
ns3/ns-3.26/src/tap-bridge/bindings/modulegen__gcc_ILP32.py
19
283308
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.tap_bridge', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate', import_from_module='ns.network') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper [class] module.add_class('TapBridgeHelper') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## system-thread.h (module 'core'): ns3::SystemThread [class] module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## unix-fd-reader.h (module 'core'): ns3::FdReader [class] module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## net-device.h (module 'network'): ns3::NetDeviceQueue [class] module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class] module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## net-device.h (module 'network'): ns3::QueueItem [class] module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) ## net-device.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration] module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network') ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge [class] module.add_class('TapBridge', parent=root_module['ns3::NetDevice']) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::Mode [enumeration] module.add_enum('Mode', ['ILLEGAL', 'CONFIGURE_LOCAL', 'USE_LOCAL', 'USE_BRIDGE'], outer_class=root_module['ns3::TapBridge']) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader [class] module.add_class('TapBridgeFdReader', parent=root_module['ns3::FdReader']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TapBridgeHelper_methods(root_module, root_module['ns3::TapBridgeHelper']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue']) register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem']) register_Ns3TapBridge_methods(root_module, root_module['ns3::TapBridge']) register_Ns3TapBridgeFdReader_methods(root_module, root_module['ns3::TapBridgeFdReader']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function] cls.add_method('GetRemainingSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBitsTxTime(uint32_t bits) const [member function] cls.add_method('CalculateBitsTxTime', 'ns3::Time', [param('uint32_t', 'bits')], is_const=True) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBytesTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateBytesTxTime', 'ns3::Time', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], deprecated=True, is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TapBridgeHelper_methods(root_module, cls): ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper(ns3::TapBridgeHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridgeHelper const &', 'arg0')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper() [constructor] cls.add_constructor([]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper(ns3::Ipv4Address gateway) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'gateway')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(std::string nodeName, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('std::string', 'nodeName'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, std::string ndName) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('std::string', 'ndName')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(std::string nodeName, std::string ndName) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('std::string', 'nodeName'), param('std::string', 'ndName')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::NetDevice> nd, ns3::AttributeValue const & bridgeType) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('ns3::AttributeValue const &', 'bridgeType')]) ## tap-bridge-helper.h (module 'tap-bridge'): void ns3::TapBridgeHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SystemThread_methods(root_module, cls): ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemThread const &', 'arg0')]) ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor] cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function] cls.add_method('Equals', 'bool', [param('pthread_t', 'id')], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function] cls.add_method('Join', 'void', []) ## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function] cls.add_method('Self', 'pthread_t', [], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3FdReader_methods(root_module, cls): ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::FdReader const &', 'arg0')]) ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor] cls.add_constructor([]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function] cls.add_method('Start', 'void', [param('int', 'fd'), param('ns3::Callback< void, unsigned char *, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function] cls.add_method('Stop', 'void', []) ## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NetDeviceQueue_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::Ptr<ns3::QueueLimits> ns3::NetDeviceQueue::GetQueueLimits() [member function] cls.add_method('GetQueueLimits', 'ns3::Ptr< ns3::QueueLimits >', []) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function] cls.add_method('IsStopped', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyQueuedBytes(uint32_t bytes) [member function] cls.add_method('NotifyQueuedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyTransmittedBytes(uint32_t bytes) [member function] cls.add_method('NotifyTransmittedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::ResetQueueLimits() [member function] cls.add_method('ResetQueueLimits', 'void', []) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetQueueLimits(ns3::Ptr<ns3::QueueLimits> ql) [member function] cls.add_method('SetQueueLimits', 'void', [param('ns3::Ptr< ns3::QueueLimits >', 'ql')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetWakeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function] cls.add_method('Start', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function] cls.add_method('Stop', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function] cls.add_method('Wake', 'void', [], is_virtual=True) return def register_Ns3NetDeviceQueueInterface_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::CreateTxQueues() [member function] cls.add_method('CreateTxQueues', 'void', []) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetNTxQueues() const [member function] cls.add_method('GetNTxQueues', 'uint8_t', [], is_const=True) ## net-device.h (module 'network'): ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::NetDeviceQueueInterface::GetSelectQueueCallback() const [member function] cls.add_method('GetSelectQueueCallback', 'ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function] cls.add_method('GetTxQueue', 'ns3::Ptr< ns3::NetDeviceQueue >', [param('uint8_t', 'i')], is_const=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetSelectQueueCallback', 'void', [param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function] cls.add_method('SetTxQueuesN', 'void', [param('uint8_t', 'numTxQueues')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function] cls.add_method('GetLocalTime', 'ns3::Time', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3QueueItem_methods(root_module, cls): cls.add_output_stream_operator() ## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')]) ## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function] cls.add_method('GetPacketSize', 'uint32_t', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function] cls.add_method('GetUint8Value', 'bool', [param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')], is_const=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) return def register_Ns3TapBridge_methods(root_module, cls): ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::TapBridge(ns3::TapBridge const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridge const &', 'arg0')]) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::TapBridge() [constructor] cls.add_constructor([]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridge::GetBridgedNetDevice() [member function] cls.add_method('GetBridgedNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::Channel> ns3::TapBridge::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): uint32_t ns3::TapBridge::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::Mode ns3::TapBridge::GetMode() [member function] cls.add_method('GetMode', 'ns3::TapBridge::Mode', []) ## tap-bridge.h (module 'tap-bridge'): uint16_t ns3::TapBridge::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::Node> ns3::TapBridge::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): static ns3::TypeId ns3::TapBridge::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetBridgedNetDevice(ns3::Ptr<ns3::NetDevice> bridgedDevice) [member function] cls.add_method('SetBridgedNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'bridgedDevice')]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetMode(ns3::TapBridge::Mode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::TapBridge::Mode', 'mode')]) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::Start(ns3::Time tStart) [member function] cls.add_method('Start', 'void', [param('ns3::Time', 'tStart')]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::Stop(ns3::Time tStop) [member function] cls.add_method('Stop', 'void', [param('ns3::Time', 'tStop')]) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::DiscardFromBridgedDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & src) [member function] cls.add_method('DiscardFromBridgedDevice', 'bool', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'src')], visibility='protected') ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::ReceiveFromBridgedDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & src, ns3::Address const & dst, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('ReceiveFromBridgedDevice', 'bool', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'src'), param('ns3::Address const &', 'dst'), param('ns3::NetDevice::PacketType', 'packetType')], visibility='protected') return def register_Ns3TapBridgeFdReader_methods(root_module, cls): ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader::TapBridgeFdReader() [constructor] cls.add_constructor([]) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader::TapBridgeFdReader(ns3::TapBridgeFdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridgeFdReader const &', 'arg0')]) ## tap-bridge.h (module 'tap-bridge'): ns3::FdReader::Data ns3::TapBridgeFdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], visibility='private', is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
apache-2.0
beernarrd/gramps
gramps/gen/filters/rules/family/_isbookmarked.py
5
1841
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2008 Brian Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .. import Rule #------------------------------------------------------------------------- # # IsBookmarked # #------------------------------------------------------------------------- class IsBookmarked(Rule): """Rule that checks for the bookmark list in the database""" name = _('Bookmarked families') category = _('General filters') description = _("Matches the families on the bookmark list") def prepare(self, db, user): self.bookmarks = db.get_family_bookmarks().get() def apply(self, db, family): return family.get_handle() in self.bookmarks
gpl-2.0
glennhickey/teHmm
scripts/multibench.py
1
4173
#!/usr/bin/env python #Copyright (C) 2014 by Glenn Hickey # #Released under the MIT license, see LICENSE.txt import unittest import sys import os import argparse import logging import random import numpy as np from teHmm.common import runShellCommand, setLogLevel, addLoggingFileHandler setLogLevel("INFO") addLoggingFileHandler("log.txt", False) # input ############ tracksPath="tracks.xml" tracksPath250="tracks_bin250.xml" regionPath="region.bed" truthPath="hollister_region.bed" trainPath="modeler_region.bed" superTrackName="repeat_modeler" segOpts = "--cutMultinomial --thresh 2" teStates = ["LINE", "SINE", "LTR", "DNA", "RC", "Unknown"] trainPath2State = "modeler_2state_region.bed" segLen = 20 numStates = 35 threads = 6 iter = 200 thresh = 0.08 emPrior = 1.0 mpFlags = "--maxProb --maxProbCut 5" fitFlags = "--ignoreTgt 0 --qualThresh 0.25" ##################### superTrackPath="tracks_super.xml" segPath = "segments.bed" sedExp = "\"s/" + "\\|".join(teStates) + "/TE/g\"" runShellCommand("rm2State.sh %s > %s" % (trainPath, trainPath2State)) # make a supervised training track runShellCommand("grep -v %s %s > %s" % (superTrackName, tracksPath250, superTrackPath)) # make a segments runShellCommand("segmentTracks.py %s %s %s %s --logInfo --logFile log.txt" % (tracksPath, regionPath, segPath, segOpts)) # do a supervised runShellCommand("mkdir -p supervised") runShellCommand("teHmmTrain.py %s %s supervised/out.mod --segment %s --supervised --segLen %d --logInfo" % ( superTrackPath, trainPath, segPath, segLen)) runShellCommand("teHmmEval.py %s %s %s --bed %s" % ( superTrackPath, "supervised/out.mod", segPath, "supervised/eval.bed")) runShellCommand("rm2State.sh %s > %s" % ("supervised/eval.bed", "supervised/evalTE.bed")) runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "supervised/evalTE.bed", "supervised/comp.txt")) runShellCommand("fitStateNames.py %s %s %s %s" % (truthPath, "supervised/eval.bed", "supervised/fit.bed", fitFlags)) runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "supervised/fit.bed", "supervised/comp_cheat.txt")) # do a semisupervised runShellCommand("mkdir -p semi") runShellCommand("createStartingModel.py %s %s %s %s %s --numTot %d --mode full --em %f --outName Unlabeled" % ( tracksPath, superTrackName, regionPath, "semi/tran.txt", "semi/em.txt", numStates, emPrior)) runShellCommand("grep -v Unlabeled semi/tran.txt > semi/tranf.txt") runShellCommand("teHmmBenchmark.py %s %s %s --truth %s --iter %d %s --transMatEpsilons --segment --segLen %d --fit --reps %d --numThreads %d --logInfo --fixStart --initTransProbs %s --forceTransProbs %s --initEmProbs %s --forceEmProbs %s --fitOpts \"%s\" " % ( tracksPath250, "semi/bench", segPath, truthPath, iter, mpFlags, segLen, threads, threads, "semi/tran.txt", "semi/tranf.txt", "semi/em.txt", "semi/em.txt", fitFlags)) evalPath = "semi/bench/" + segPath[:-4] + "_eval.bed" compPath = "semi/bench/" + segPath[:-4] + "_comp.txt" runShellCommand("rm2State.sh %s > %s" % (evalPath, "semi/eval1.bed")) runShellCommand("fitStateNames.py %s %s %s %s" % (trainPath2State, "semi/eval1.bed", "semi/fit1.bed", fitFlags)) runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "semi/fit1.bed", "semi/comp.txt")) runShellCommand("cp %s %s" % (compPath, "semi/comp_cheat.txt")) # do a unsupervised runShellCommand("mkdir -p unsup") runShellCommand("teHmmBenchmark.py %s %s %s --truth %s --iter %d %s --maxProb --maxProbCut 5 --segment --segLen %s --fit --reps %d --numThreads %d --logInfo --fixStart --emStates %s --fitOpts \"%s\"" % ( tracksPath250, "unsup/bench", segPath, truthPath, iter, mpFlags, segLen, threads, threads, numStates, fitFlags)) evalPath = "unsup/bench/" + segPath[:-4] + "_eval.bed" compPath = "unsup/bench/" + segPath[:-4] + "_comp.txt" runShellCommand("rm2State.sh %s > %s" % (evalPath, "unsup/eval1.bed")) runShellCommand("fitStateNames.py %s %s %s %s" % (trainPath2State, "unsup/eval1.bed", "unsup/fit1.bed", fitFlags)) runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "unsup/fit1.bed", "unsup/comp.txt")) runShellCommand("cp %s %s" % (compPath, "unsup/comp_cheat.txt"))
mit
analurandis/Tur
backend/venv/Lib/site-packages/unidecode/x01e.py
246
3853
data = ( 'A', # 0x00 'a', # 0x01 'B', # 0x02 'b', # 0x03 'B', # 0x04 'b', # 0x05 'B', # 0x06 'b', # 0x07 'C', # 0x08 'c', # 0x09 'D', # 0x0a 'd', # 0x0b 'D', # 0x0c 'd', # 0x0d 'D', # 0x0e 'd', # 0x0f 'D', # 0x10 'd', # 0x11 'D', # 0x12 'd', # 0x13 'E', # 0x14 'e', # 0x15 'E', # 0x16 'e', # 0x17 'E', # 0x18 'e', # 0x19 'E', # 0x1a 'e', # 0x1b 'E', # 0x1c 'e', # 0x1d 'F', # 0x1e 'f', # 0x1f 'G', # 0x20 'g', # 0x21 'H', # 0x22 'h', # 0x23 'H', # 0x24 'h', # 0x25 'H', # 0x26 'h', # 0x27 'H', # 0x28 'h', # 0x29 'H', # 0x2a 'h', # 0x2b 'I', # 0x2c 'i', # 0x2d 'I', # 0x2e 'i', # 0x2f 'K', # 0x30 'k', # 0x31 'K', # 0x32 'k', # 0x33 'K', # 0x34 'k', # 0x35 'L', # 0x36 'l', # 0x37 'L', # 0x38 'l', # 0x39 'L', # 0x3a 'l', # 0x3b 'L', # 0x3c 'l', # 0x3d 'M', # 0x3e 'm', # 0x3f 'M', # 0x40 'm', # 0x41 'M', # 0x42 'm', # 0x43 'N', # 0x44 'n', # 0x45 'N', # 0x46 'n', # 0x47 'N', # 0x48 'n', # 0x49 'N', # 0x4a 'n', # 0x4b 'O', # 0x4c 'o', # 0x4d 'O', # 0x4e 'o', # 0x4f 'O', # 0x50 'o', # 0x51 'O', # 0x52 'o', # 0x53 'P', # 0x54 'p', # 0x55 'P', # 0x56 'p', # 0x57 'R', # 0x58 'r', # 0x59 'R', # 0x5a 'r', # 0x5b 'R', # 0x5c 'r', # 0x5d 'R', # 0x5e 'r', # 0x5f 'S', # 0x60 's', # 0x61 'S', # 0x62 's', # 0x63 'S', # 0x64 's', # 0x65 'S', # 0x66 's', # 0x67 'S', # 0x68 's', # 0x69 'T', # 0x6a 't', # 0x6b 'T', # 0x6c 't', # 0x6d 'T', # 0x6e 't', # 0x6f 'T', # 0x70 't', # 0x71 'U', # 0x72 'u', # 0x73 'U', # 0x74 'u', # 0x75 'U', # 0x76 'u', # 0x77 'U', # 0x78 'u', # 0x79 'U', # 0x7a 'u', # 0x7b 'V', # 0x7c 'v', # 0x7d 'V', # 0x7e 'v', # 0x7f 'W', # 0x80 'w', # 0x81 'W', # 0x82 'w', # 0x83 'W', # 0x84 'w', # 0x85 'W', # 0x86 'w', # 0x87 'W', # 0x88 'w', # 0x89 'X', # 0x8a 'x', # 0x8b 'X', # 0x8c 'x', # 0x8d 'Y', # 0x8e 'y', # 0x8f 'Z', # 0x90 'z', # 0x91 'Z', # 0x92 'z', # 0x93 'Z', # 0x94 'z', # 0x95 'h', # 0x96 't', # 0x97 'w', # 0x98 'y', # 0x99 'a', # 0x9a 'S', # 0x9b '[?]', # 0x9c '[?]', # 0x9d 'Ss', # 0x9e '[?]', # 0x9f 'A', # 0xa0 'a', # 0xa1 'A', # 0xa2 'a', # 0xa3 'A', # 0xa4 'a', # 0xa5 'A', # 0xa6 'a', # 0xa7 'A', # 0xa8 'a', # 0xa9 'A', # 0xaa 'a', # 0xab 'A', # 0xac 'a', # 0xad 'A', # 0xae 'a', # 0xaf 'A', # 0xb0 'a', # 0xb1 'A', # 0xb2 'a', # 0xb3 'A', # 0xb4 'a', # 0xb5 'A', # 0xb6 'a', # 0xb7 'E', # 0xb8 'e', # 0xb9 'E', # 0xba 'e', # 0xbb 'E', # 0xbc 'e', # 0xbd 'E', # 0xbe 'e', # 0xbf 'E', # 0xc0 'e', # 0xc1 'E', # 0xc2 'e', # 0xc3 'E', # 0xc4 'e', # 0xc5 'E', # 0xc6 'e', # 0xc7 'I', # 0xc8 'i', # 0xc9 'I', # 0xca 'i', # 0xcb 'O', # 0xcc 'o', # 0xcd 'O', # 0xce 'o', # 0xcf 'O', # 0xd0 'o', # 0xd1 'O', # 0xd2 'o', # 0xd3 'O', # 0xd4 'o', # 0xd5 'O', # 0xd6 'o', # 0xd7 'O', # 0xd8 'o', # 0xd9 'O', # 0xda 'o', # 0xdb 'O', # 0xdc 'o', # 0xdd 'O', # 0xde 'o', # 0xdf 'O', # 0xe0 'o', # 0xe1 'O', # 0xe2 'o', # 0xe3 'U', # 0xe4 'u', # 0xe5 'U', # 0xe6 'u', # 0xe7 'U', # 0xe8 'u', # 0xe9 'U', # 0xea 'u', # 0xeb 'U', # 0xec 'u', # 0xed 'U', # 0xee 'u', # 0xef 'U', # 0xf0 'u', # 0xf1 'Y', # 0xf2 'y', # 0xf3 'Y', # 0xf4 'y', # 0xf5 'Y', # 0xf6 'y', # 0xf7 'Y', # 0xf8 'y', # 0xf9 '[?]', # 0xfa '[?]', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
mit
slurps-mad-rips/color
color/__init__.py
1
2118
from contextlib import contextmanager import builtins import sys __all__ = ['print'] # There's quite a bit of setup for windows, but oh well. if sys.platform == 'win32': from ctypes import windll, Structure, byref from ctypes import c_ushort as ushort, c_short as short class Coord(Structure): _fields_ = [('x', short), ('y', short)] class SmallRect(Structure): _fields_ = [(x, short) for x in ('left', 'top', 'right', 'bottom')] class ConsoleScreenBufferInfo(Structure): _fields_ = [ ('size', Coord), ('cursor_position', Coord), ('attributes', ushort), ('window', SmallRect), ('maximum_window_size', Coord) ] # STDOUT and STDERR use the same attributes __handle = windll.kernel32.GetStdHandle(-11) def __get_console(): csbi = ConsoleScreenBufferInfo() windll.kernel32.GetConsoleScreenBufferInfo(__handle, byref(csbi)) return csbi.attributes set_console = windll.kernel32.SetConsoleTextAttribute default_colors = __get_console() __windows = sys.platform == 'win32' __win32 = [color | 0x8 & 0xFF0F for color in [0, 4, 2, 6, 1, 5, 3, 7]] __names = 'grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' __colors = dict(zip(__names, __win32 if __windows else range(8))) def print(*args, **kwargs): if 'color' not in kwargs: builtins.print(*args, **kwargs) return color = kwargs['color'] if color not in __colors: raise AttributeError('invalid color: {}'.format(color)) color = __colors[kwargs['color']] file = kwargs.get('file', sys.stdout) sep = kwargs.get('sep', ' ') end = kwargs.get('end', '\n') with __setup_color(color, file): builtins.print(*args, sep=sep, end=end, file=file) @contextmanager def __setup_color(color, file): if __windows: set_console(__handle, color) else: builtins.print('\33[1;3{}m'.format(color), end='', file=file) yield if __windows: set_console(__handle, default_colors) else: builtins.print('\33[0m', end='', file=file)
bsd-2-clause
Javiercerna/MissionPlanner
Lib/sndhdr.py
63
6201
"""Routines to help recognizing sound files. Function whathdr() recognizes various types of sound file headers. It understands almost all headers that SOX can decode. The return tuple contains the following items, in this order: - file type (as SOX understands it) - sampling rate (0 if unknown or hard to decode) - number of channels (0 if unknown or hard to decode) - number of frames in the file (-1 if unknown or hard to decode) - number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW If the file doesn't have a recognizable type, it returns None. If the file can't be opened, IOError is raised. To compute the total time, divide the number of frames by the sampling rate (a frame contains a sample for each channel). Function what() calls whathdr(). (It used to also use some heuristics for raw data, but this doesn't work very well.) Finally, the function test() is a simple main program that calls what() for all files mentioned on the argument list. For directory arguments it calls what() for all files in that directory. Default argument is "." (testing all files in the current directory). The option -r tells it to recurse down directories found inside explicitly given directories. """ # The file structure is top-down except that the test program and its # subroutine come last. __all__ = ["what","whathdr"] def what(filename): """Guess the type of a sound file""" res = whathdr(filename) return res def whathdr(filename): """Recognize sound headers""" f = open(filename, 'rb') h = f.read(512) for tf in tests: res = tf(h, f) if res: return res return None #-----------------------------------# # Subroutines per sound header type # #-----------------------------------# tests = [] def test_aifc(h, f): import aifc if h[:4] != 'FORM': return None if h[8:12] == 'AIFC': fmt = 'aifc' elif h[8:12] == 'AIFF': fmt = 'aiff' else: return None f.seek(0) try: a = aifc.openfp(f, 'r') except (EOFError, aifc.Error): return None return (fmt, a.getframerate(), a.getnchannels(), \ a.getnframes(), 8*a.getsampwidth()) tests.append(test_aifc) def test_au(h, f): if h[:4] == '.snd': f = get_long_be elif h[:4] in ('\0ds.', 'dns.'): f = get_long_le else: return None type = 'au' hdr_size = f(h[4:8]) data_size = f(h[8:12]) encoding = f(h[12:16]) rate = f(h[16:20]) nchannels = f(h[20:24]) sample_size = 1 # default if encoding == 1: sample_bits = 'U' elif encoding == 2: sample_bits = 8 elif encoding == 3: sample_bits = 16 sample_size = 2 else: sample_bits = '?' frame_size = sample_size * nchannels return type, rate, nchannels, data_size//frame_size, sample_bits tests.append(test_au) def test_hcom(h, f): if h[65:69] != 'FSSD' or h[128:132] != 'HCOM': return None divisor = get_long_be(h[128+16:128+20]) return 'hcom', 22050//divisor, 1, -1, 8 tests.append(test_hcom) def test_voc(h, f): if h[:20] != 'Creative Voice File\032': return None sbseek = get_short_le(h[20:22]) rate = 0 if 0 <= sbseek < 500 and h[sbseek] == '\1': ratecode = ord(h[sbseek+4]) rate = int(1000000.0 / (256 - ratecode)) return 'voc', rate, 1, -1, 8 tests.append(test_voc) def test_wav(h, f): # 'RIFF' <len> 'WAVE' 'fmt ' <len> if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ': return None style = get_short_le(h[20:22]) nchannels = get_short_le(h[22:24]) rate = get_long_le(h[24:28]) sample_bits = get_short_le(h[34:36]) return 'wav', rate, nchannels, -1, sample_bits tests.append(test_wav) def test_8svx(h, f): if h[:4] != 'FORM' or h[8:12] != '8SVX': return None # Should decode it to get #channels -- assume always 1 return '8svx', 0, 1, 0, 8 tests.append(test_8svx) def test_sndt(h, f): if h[:5] == 'SOUND': nsamples = get_long_le(h[8:12]) rate = get_short_le(h[20:22]) return 'sndt', rate, 1, nsamples, 8 tests.append(test_sndt) def test_sndr(h, f): if h[:2] == '\0\0': rate = get_short_le(h[2:4]) if 4000 <= rate <= 25000: return 'sndr', rate, 1, -1, 8 tests.append(test_sndr) #---------------------------------------------# # Subroutines to extract numbers from strings # #---------------------------------------------# def get_long_be(s): return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3]) def get_long_le(s): return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0]) def get_short_be(s): return (ord(s[0])<<8) | ord(s[1]) def get_short_le(s): return (ord(s[1])<<8) | ord(s[0]) #--------------------# # Small test program # #--------------------# def test(): import sys recursive = 0 if sys.argv[1:] and sys.argv[1] == '-r': del sys.argv[1:2] recursive = 1 try: if sys.argv[1:]: testall(sys.argv[1:], recursive, 1) else: testall(['.'], recursive, 1) except KeyboardInterrupt: sys.stderr.write('\n[Interrupted]\n') sys.exit(1) def testall(list, recursive, toplevel): import sys import os for filename in list: if os.path.isdir(filename): print filename + '/:', if recursive or toplevel: print 'recursing down:' import glob names = glob.glob(os.path.join(filename, '*')) testall(names, recursive, 0) else: print '*** directory (use -r) ***' else: print filename + ':', sys.stdout.flush() try: print what(filename) except IOError: print '*** not found ***' if __name__ == '__main__': test()
gpl-3.0
Jeff-Tian/mybnb
Python27/Lib/lib-tk/test/test_tkinter/test_text.py
3
1515
import unittest import Tkinter as tkinter from test.test_support import requires, run_unittest from test_ttk.support import AbstractTkTest requires('gui') class TextTest(AbstractTkTest, unittest.TestCase): def setUp(self): super(TextTest, self).setUp() self.text = tkinter.Text(self.root) def test_debug(self): text = self.text olddebug = text.debug() try: text.debug(0) self.assertEqual(text.debug(), 0) text.debug(1) self.assertEqual(text.debug(), 1) finally: text.debug(olddebug) self.assertEqual(text.debug(), olddebug) def test_search(self): text = self.text # pattern and index are obligatory arguments. self.assertRaises(tkinter.TclError, text.search, None, '1.0') self.assertRaises(tkinter.TclError, text.search, 'a', None) self.assertRaises(tkinter.TclError, text.search, None, None) # Invalid text index. self.assertRaises(tkinter.TclError, text.search, '', 0) # Check if we are getting the indices as strings -- you are likely # to get Tcl_Obj under Tk 8.5 if Tkinter doesn't convert it. text.insert('1.0', 'hi-test') self.assertEqual(text.search('-test', '1.0', 'end'), '1.2') self.assertEqual(text.search('test', '1.0', 'end'), '1.3') tests_gui = (TextTest, ) if __name__ == "__main__": run_unittest(*tests_gui)
apache-2.0
fearlessspider/python-social-auth
social/backends/lastfm.py
70
1888
import hashlib from social.utils import handle_http_errors from social.backends.base import BaseAuth class LastFmAuth(BaseAuth): """ Last.Fm authentication backend. Requires two settings: SOCIAL_AUTH_LASTFM_KEY SOCIAL_AUTH_LASTFM_SECRET Don't forget to set the Last.fm callback to something sensible like http://your.site/lastfm/complete """ name = 'lastfm' AUTH_URL = 'http://www.last.fm/api/auth/?api_key={api_key}' EXTRA_DATA = [ ('key', 'session_key') ] def auth_url(self): return self.AUTH_URL.format(api_key=self.setting('KEY')) @handle_http_errors def auth_complete(self, *args, **kwargs): """Completes login process, must return user instance""" key, secret = self.get_key_and_secret() token = self.data['token'] signature = hashlib.md5(''.join( ('api_key', key, 'methodauth.getSession', 'token', token, secret) ).encode()).hexdigest() response = self.get_json('http://ws.audioscrobbler.com/2.0/', data={ 'method': 'auth.getSession', 'api_key': key, 'token': token, 'api_sig': signature, 'format': 'json' }, method='POST') kwargs.update({'response': response['session'], 'backend': self}) return self.strategy.authenticate(*args, **kwargs) def get_user_id(self, details, response): """Return a unique ID for the current user, by default from server response.""" return response.get('name') def get_user_details(self, response): fullname, first_name, last_name = self.get_user_names(response['name']) return { 'username': response['name'], 'email': '', 'fullname': fullname, 'first_name': first_name, 'last_name': last_name }
bsd-3-clause
RedHat-Eng-PGM/python-schedules-tools
schedules_tools/schedule_handlers/__init__.py
1
3014
from datetime import datetime import logging import pytz log = logging.getLogger(__name__) # Handle implementation must be in format ScheduleHandler_format # where 'format' is used as a uniq label for the format and # 'ScheduleHandler' can be whatever. class ScheduleHandlerBase(object): handle = None schedule = None # This flag indicate ability to export internal intermediate structure # (Schedule) into format of implementation. It's read by ScheduleConverter # during autodiscovery and used to provide actual help message in CLI # TODO: add provide_import to be complete? provide_export = False provide_changelog = False provide_mtime = False options = {} default_export_ext = None # Handlers can depend on additional python modules. We don't require from # users to have all of them installed if they aren't used. # This flag indicates that the handler can be fully utilized and there is # no missing dependent packages installed. handle_deps_satisfied = False def __init__(self, handle=None, schedule=None, options=dict()): self.schedule = schedule self.options = options # set handle last - there might be custom processing that requires options to already be set self.handle = handle # 'handle' is source/target of schedule in general def _write_to_file(self, content, filename): with open(filename, 'wb') as fp: fp.write(content.strip().encode('UTF-8')) def get_handle_mtime(self): """ Implement only if schedule handler is able to get mtime directly without storage """ raise NotImplementedError def handle_modified_since(self, mtime): """ Return boolean to be able to bypass processing """ # Return False only when able to tell otherwise return True modified = True if isinstance(mtime, datetime): try: handle_mtime = self.get_handle_mtime() except NotImplementedError: pass # we're working with TZ naive dates (but in UTC) if handle_mtime: if handle_mtime.tzinfo is not None: handle_mtime = handle_mtime.astimezone(pytz.utc).replace(tzinfo=None) if handle_mtime <= mtime: modified = False return modified def get_handle_changelog(self): raise NotImplementedError # handle - file/link/smartsheet id def import_schedule(self): raise NotImplementedError def export_schedule(self): raise NotImplementedError def build_schedule(self): raise NotImplementedError @classmethod def is_valid_source(cls, handle=None): """Method returns True, if the specific handler is able to work with given handle""" return False def extract_backup(self, handle=None): """Prepare files which need a backup in case of external source""" return []
gpl-3.0
azurestandard/django
tests/regressiontests/introspection/tests.py
8
6625
from __future__ import absolute_import,unicode_literals from functools import update_wrapper from django.db import connection from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature from .models import Reporter, Article # # The introspection module is optional, so methods tested here might raise # NotImplementedError. This is perfectly acceptable behavior for the backend # in question, but the tests need to handle this without failing. Ideally we'd # skip these tests, but until #4788 is done we'll just ignore them. # # The easiest way to accomplish this is to decorate every test case with a # wrapper that ignores the exception. # # The metaclass is just for fun. # def ignore_not_implemented(func): def _inner(*args, **kwargs): try: return func(*args, **kwargs) except NotImplementedError: return None update_wrapper(_inner, func) return _inner class IgnoreNotimplementedError(type): def __new__(cls, name, bases, attrs): for k,v in attrs.items(): if k.startswith('test'): attrs[k] = ignore_not_implemented(v) return type.__new__(cls, name, bases, attrs) class IntrospectionTests(TestCase): __metaclass__ = IgnoreNotimplementedError def test_table_names(self): tl = connection.introspection.table_names() self.assertEqual(tl, sorted(tl)) self.assertTrue(Reporter._meta.db_table in tl, "'%s' isn't in table_list()." % Reporter._meta.db_table) self.assertTrue(Article._meta.db_table in tl, "'%s' isn't in table_list()." % Article._meta.db_table) def test_django_table_names(self): cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names() cursor.execute("DROP TABLE django_ixn_test_table;") self.assertTrue('django_ixn_testcase_table' not in tl, "django_table_names() returned a non-Django table") def test_django_table_names_retval_type(self): # Ticket #15216 cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names(only_existing=True) self.assertIs(type(tl), list) tl = connection.introspection.django_table_names(only_existing=False) self.assertIs(type(tl), list) def test_installed_models(self): tables = [Article._meta.db_table, Reporter._meta.db_table] models = connection.introspection.installed_models(tables) self.assertEqual(models, set([Article, Reporter])) def test_sequence_list(self): sequences = connection.introspection.sequence_list() expected = {'table': Reporter._meta.db_table, 'column': 'id'} self.assertTrue(expected in sequences, 'Reporter sequence not found in sequence_list()') def test_get_table_description_names(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual([r[0] for r in desc], [f.column for f in Reporter._meta.fields]) def test_get_table_description_types(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [datatype(r[1], r) for r in desc], ['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField'] ) # Oracle forces null=True under the hood in some cases (see # https://docs.djangoproject.com/en/dev/ref/databases/#null-and-empty-strings) # so its idea about null_ok in cursor.description is different from ours. @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_get_table_description_nullable(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[6] for r in desc], [False, False, False, False, True] ) # Regression test for #9991 - 'real' types in postgres @skipUnlessDBFeature('has_real_datatype') def test_postgresql_real_type(self): cursor = connection.cursor() cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table') cursor.execute('DROP TABLE django_ixn_real_test_table;') self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField') def test_get_relations(self): cursor = connection.cursor() relations = connection.introspection.get_relations(cursor, Article._meta.db_table) # Older versions of MySQL don't have the chops to report on this stuff, # so just skip it if no relations come back. If they do, though, we # should test that the response is correct. if relations: # That's {field_index: (field_index_other_table, other_table)} self.assertEqual(relations, {3: (0, Reporter._meta.db_table)}) def test_get_key_columns(self): cursor = connection.cursor() key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table) self.assertEqual(key_columns, [('reporter_id', Reporter._meta.db_table, 'id')]) def test_get_primary_key_column(self): cursor = connection.cursor() primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table) self.assertEqual(primary_key_column, 'id') def test_get_indexes(self): cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table) self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False}) def test_get_indexes_multicol(self): """ Test that multicolumn indexes are not included in the introspection results. """ cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table) self.assertNotIn('first_name', indexes) self.assertIn('id', indexes) def datatype(dbtype, description): """Helper to convert a data type into a string.""" dt = connection.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
bsd-3-clause
Michaelmwirigi/lynsays
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/asm.py
292
13314
# -*- coding: utf-8 -*- """ pygments.lexers.asm ~~~~~~~~~~~~~~~~~~~ Lexers for assembly languages. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer from pygments.lexers.compiled import DLexer, CppLexer, CLexer from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ Other, Keyword, Operator __all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer', 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'Ca65Lexer'] class GasLexer(RegexLexer): """ For Gas (AT&T) assembly code. """ name = 'GAS' aliases = ['gas', 'asm'] filenames = ['*.s', '*.S'] mimetypes = ['text/x-gas'] #: optional Comment or Whitespace string = r'"(\\"|[^"])*"' char = r'[a-zA-Z$._0-9@-]' identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' number = r'(?:0[xX][a-zA-Z0-9]+|\d+)' tokens = { 'root': [ include('whitespace'), (identifier + ':', Name.Label), (r'\.' + identifier, Name.Attribute, 'directive-args'), (r'lock|rep(n?z)?|data\d+', Name.Attribute), (identifier, Name.Function, 'instruction-args'), (r'[\r\n]+', Text) ], 'directive-args': [ (identifier, Name.Constant), (string, String), ('@' + identifier, Name.Attribute), (number, Number.Integer), (r'[\r\n]+', Text, '#pop'), (r'#.*?$', Comment, '#pop'), include('punctuation'), include('whitespace') ], 'instruction-args': [ # For objdump-disassembled code, shouldn't occur in # actual assembler input ('([a-z0-9]+)( )(<)('+identifier+')(>)', bygroups(Number.Hex, Text, Punctuation, Name.Constant, Punctuation)), ('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)', bygroups(Number.Hex, Text, Punctuation, Name.Constant, Punctuation, Number.Integer, Punctuation)), # Address constants (identifier, Name.Constant), (number, Number.Integer), # Registers ('%' + identifier, Name.Variable), # Numeric constants ('$'+number, Number.Integer), (r"$'(.|\\')'", String.Char), (r'[\r\n]+', Text, '#pop'), (r'#.*?$', Comment, '#pop'), include('punctuation'), include('whitespace') ], 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'#.*?\n', Comment) ], 'punctuation': [ (r'[-*,.():]+', Punctuation) ] } def analyse_text(text): if re.match(r'^\.(text|data|section)', text, re.M): return True elif re.match(r'^\.\w+', text, re.M): return 0.1 class ObjdumpLexer(RegexLexer): """ For the output of 'objdump -dr' """ name = 'objdump' aliases = ['objdump'] filenames = ['*.objdump'] mimetypes = ['text/x-objdump'] hex = r'[0-9A-Za-z]' tokens = { 'root': [ # File name & format: ('(.*?)(:)( +file format )(.*?)$', bygroups(Name.Label, Punctuation, Text, String)), # Section header ('(Disassembly of section )(.*?)(:)$', bygroups(Text, Name.Label, Punctuation)), # Function labels # (With offset) ('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation, Number.Hex, Punctuation)), # (Without offset) ('('+hex+'+)( )(<)(.*?)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation)), # Code line with disassembled instructions ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, using(GasLexer))), # Code line with ascii ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, String)), # Continued code line, only raw opcodes without disassembled # instruction ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$', bygroups(Text, Name.Label, Text, Number.Hex)), # Skipped a few bytes (r'\t\.\.\.$', Text), # Relocation line # (With offset) (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant, Punctuation, Number.Hex)), # (Without offset) (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant)), (r'[^\n]+\n', Other) ] } class DObjdumpLexer(DelegatingLexer): """ For the output of 'objdump -Sr on compiled D files' """ name = 'd-objdump' aliases = ['d-objdump'] filenames = ['*.d-objdump'] mimetypes = ['text/x-d-objdump'] def __init__(self, **options): super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options) class CppObjdumpLexer(DelegatingLexer): """ For the output of 'objdump -Sr on compiled C++ files' """ name = 'cpp-objdump' aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump'] filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'] mimetypes = ['text/x-cpp-objdump'] def __init__(self, **options): super(CppObjdumpLexer, self).__init__(CppLexer, ObjdumpLexer, **options) class CObjdumpLexer(DelegatingLexer): """ For the output of 'objdump -Sr on compiled C files' """ name = 'c-objdump' aliases = ['c-objdump'] filenames = ['*.c-objdump'] mimetypes = ['text/x-c-objdump'] def __init__(self, **options): super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options) class LlvmLexer(RegexLexer): """ For LLVM assembly code. """ name = 'LLVM' aliases = ['llvm'] filenames = ['*.ll'] mimetypes = ['text/x-llvm'] #: optional Comment or Whitespace string = r'"[^"]*?"' identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')' tokens = { 'root': [ include('whitespace'), # Before keywords, because keywords are valid label names :(... (identifier + '\s*:', Name.Label), include('keyword'), (r'%' + identifier, Name.Variable),#Name.Identifier.Local), (r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global), (r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous), (r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous), (r'!' + identifier, Name.Variable), (r'!\d+', Name.Variable.Anonymous), (r'c?' + string, String), (r'0[xX][a-fA-F0-9]+', Number), (r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number), (r'[=<>{}\[\]()*.,!]|x\b', Punctuation) ], 'whitespace': [ (r'(\n|\s)+', Text), (r';.*?\n', Comment) ], 'keyword': [ # Regular keywords (r'(begin|end' r'|true|false' r'|declare|define' r'|global|constant' r'|private|linker_private|internal|available_externally|linkonce' r'|linkonce_odr|weak|weak_odr|appending|dllimport|dllexport' r'|common|default|hidden|protected|extern_weak|external' r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple' r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds' r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg' r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc' r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel' r'|cc|c' r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture' r'|byval|nest|readnone|readonly' r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone' r'|noimplicitfloat|naked' r'|type|opaque' r'|eq|ne|slt|sgt|sle' r'|sge|ult|ugt|ule|uge' r'|oeq|one|olt|ogt|ole' r'|oge|ord|uno|ueq|une' r'|x' # instructions r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl' r'|lshr|ashr|and|or|xor|icmp|fcmp' r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui' r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch' r'|invoke|unwind|unreachable' r'|malloc|alloca|free|load|store|getelementptr' r'|extractelement|insertelement|shufflevector|getresult' r'|extractvalue|insertvalue' r')\b', Keyword), # Types (r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata', Keyword.Type), # Integer types (r'i[1-9]\d*', Keyword) ] } class NasmLexer(RegexLexer): """ For Nasm (Intel) assembly code. """ name = 'NASM' aliases = ['nasm'] filenames = ['*.asm', '*.ASM'] mimetypes = ['text/x-nasm'] identifier = r'[a-zA-Z$._?][a-zA-Z0-9$._?#@~]*' hexn = r'(?:0[xX][0-9a-fA-F]+|$0[0-9a-fA-F]*|[0-9]+[0-9a-fA-F]*h)' octn = r'[0-7]+q' binn = r'[01]+b' decn = r'[0-9]+' floatn = decn + r'\.e?' + decn string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`" declkw = r'(?:res|d)[bwdqt]|times' register = (r'r[0-9][0-5]?[bwd]|' r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|' r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]') wordop = r'seg|wrt|strict' type = r'byte|[dq]?word' directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|' r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|' r'EXPORT|LIBRARY|MODULE') flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ include('whitespace'), (r'^\s*%', Comment.Preproc, 'preproc'), (identifier + ':', Name.Label), (r'(%s)(\s+)(equ)' % identifier, bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration), 'instruction-args'), (directives, Keyword, 'instruction-args'), (declkw, Keyword.Declaration, 'instruction-args'), (identifier, Name.Function, 'instruction-args'), (r'[\r\n]+', Text) ], 'instruction-args': [ (string, String), (hexn, Number.Hex), (octn, Number.Oct), (binn, Number), (floatn, Number.Float), (decn, Number.Integer), include('punctuation'), (register, Name.Builtin), (identifier, Name.Variable), (r'[\r\n]+', Text, '#pop'), include('whitespace') ], 'preproc': [ (r'[^;\n]+', Comment.Preproc), (r';.*?\n', Comment.Single, '#pop'), (r'\n', Comment.Preproc, '#pop'), ], 'whitespace': [ (r'\n', Text), (r'[ \t]+', Text), (r';.*', Comment.Single) ], 'punctuation': [ (r'[,():\[\]]+', Punctuation), (r'[&|^<>+*/%~-]+', Operator), (r'[$]+', Keyword.Constant), (wordop, Operator.Word), (type, Keyword.Type) ], } class Ca65Lexer(RegexLexer): """ For ca65 assembler sources. *New in Pygments 1.6.* """ name = 'ca65' aliases = ['ca65'] filenames = ['*.s'] flags = re.IGNORECASE tokens = { 'root': [ (r';.*', Comment.Single), (r'\s+', Text), (r'[a-z_.@$][\w.@$]*:', Name.Label), (r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]' r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs' r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor' r'|bit)\b', Keyword), (r'\.[a-z0-9_]+', Keyword.Pseudo), (r'[-+~*/^&|!<>=]', Operator), (r'"[^"\n]*.', String), (r"'[^'\n]*.", String.Char), (r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex), (r'\d+|%[01]+', Number.Integer), (r'[#,.:()=]', Punctuation), (r'[a-z_.@$][\w.@$]*', Name), ] } def analyse_text(self, text): # comments in GAS start with "#" if re.match(r'^\s*;', text, re.MULTILINE): return 0.9
mit
iut-ibk/DynaMind-UrbanSim
3rdparty/opus/src/urbansim_parcel/zone/population_per_acre.py
2
2455
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE from opus_core.variables.variable import Variable from variable_functions import my_attribute_label class population_per_acre(Variable): """population in a zone / acres in a zone""" _return_type="float32" def dependencies(self): return [ "urbansim_parcel.household.zone_id", "population = zone.aggregate(household.persons)", "acres = zone.aggregate(parcel.parcel_sqft) / 43560.0 ", ] def compute(self, dataset_pool): return self.get_dataset().get_attribute('population') / self.get_dataset().get_attribute('acres') def post_check(self, values, dataset_pool=None): size = dataset_pool.get_dataset("parcel").get_attribute("population").sum() self.do_check("x >= 0 and x <= " + str(size), values) from opus_core.tests import opus_unittest from opus_core.datasets.dataset_pool import DatasetPool from opus_core.storage_factory import StorageFactory from numpy import array from opus_core.tests.utils.variable_tester import VariableTester class Tests(opus_unittest.OpusTestCase): def test_my_inputs(self): tester = VariableTester( __file__, package_order=['urbansim_parcel','urbansim'], test_data={ "household":{ "household_id":array([1, 2, 3, 4, 5, 6, 7, 8]), "building_id": array([1, 2, 2, 2, 3, 3, 4, 5]), "persons": array([1, 2, 2, 2, 3, 3, 1, 5]) }, "building":{ "building_id":array([1,2,3,4,5]), "parcel_id": array([1,1,2,3,4]) }, "parcel":{ "parcel_id":array([1,2,3,4]), "zone_id": array([1,3,2,2]), "parcel_sqft":array([0.1, 0.2, 0.4, 0.3]) * 43560.0, }, "zone":{ "zone_id":array([1,2,3]), } } ) should_be = array([7/0.1, 6/(0.3+0.4), 6/0.2]) tester.test_is_close_for_variable_defined_by_this_module(self, should_be) if __name__=='__main__': opus_unittest.main()
gpl-2.0
rsethur/ThinkStats2
code/thinkstats2_test.py
66
12723
"""This file contains code for use with "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division import unittest import random from collections import Counter import numpy as np import thinkstats2 import thinkplot class Test(unittest.TestCase): def testOdds(self): p = 0.75 o = thinkstats2.Odds(p) self.assertEqual(o, 3) p = thinkstats2.Probability(o) self.assertEqual(p, 0.75) p = thinkstats2.Probability2(3, 1) self.assertEqual(p, 0.75) def testMean(self): t = [1, 1, 1, 3, 3, 591] mean = thinkstats2.Mean(t) self.assertEqual(mean, 100) def testVar(self): t = [1, 1, 1, 3, 3, 591] mean = thinkstats2.Mean(t) var1 = thinkstats2.Var(t) var2 = thinkstats2.Var(t, mean) self.assertAlmostEqual(mean, 100.0) self.assertAlmostEqual(var1, 48217.0) self.assertAlmostEqual(var2, 48217.0) def testMeanVar(self): t = [1, 1, 1, 3, 3, 591] mean, var = thinkstats2.MeanVar(t) self.assertAlmostEqual(mean, 100.0) self.assertAlmostEqual(var, 48217.0) def testBinomialCoef(self): res = thinkstats2.BinomialCoef(10, 3) self.assertEqual(round(res), 120) res = thinkstats2.BinomialCoef(100, 4) self.assertEqual(round(res), 3921225) def testInterpolator(self): xs = [1, 2, 3] ys = [4, 5, 6] interp = thinkstats2.Interpolator(xs, ys) y = interp.Lookup(1) self.assertAlmostEqual(y, 4) y = interp.Lookup(2) self.assertAlmostEqual(y, 5) y = interp.Lookup(3) self.assertAlmostEqual(y, 6) y = interp.Lookup(1.5) self.assertAlmostEqual(y, 4.5) y = interp.Lookup(2.75) self.assertAlmostEqual(y, 5.75) x = interp.Reverse(4) self.assertAlmostEqual(x, 1) x = interp.Reverse(6) self.assertAlmostEqual(x, 3) x = interp.Reverse(4.5) self.assertAlmostEqual(x, 1.5) x = interp.Reverse(5.75) self.assertAlmostEqual(x, 2.75) def testTrim(self): t = list(range(100)) random.shuffle(t) trimmed = thinkstats2.Trim(t, p=0.05) n = len(trimmed) self.assertEqual(n, 90) def testHist(self): hist = thinkstats2.Hist('allen') self.assertEqual(len(str(hist)), 38) self.assertEqual(len(hist), 4) self.assertEqual(hist.Freq('l'), 2) hist = thinkstats2.Hist(Counter('allen')) self.assertEqual(len(hist), 4) self.assertEqual(hist.Freq('l'), 2) hist2 = thinkstats2.Hist('nella') self.assertEqual(hist, hist2) def testPmf(self): pmf = thinkstats2.Pmf('allen') # this one might not be a robust test self.assertEqual(len(str(pmf)), 45) self.assertEqual(len(pmf), 4) self.assertEqual(pmf.Prob('l'), 0.4) self.assertEqual(pmf['l'], 0.4) self.assertEqual(pmf.Percentile(50), 'l') pmf = thinkstats2.Pmf(Counter('allen')) self.assertEqual(len(pmf), 4) self.assertEqual(pmf.Prob('l'), 0.4) pmf = thinkstats2.Pmf(pmf) self.assertEqual(len(pmf), 4) self.assertEqual(pmf.Prob('l'), 0.4) pmf2 = pmf.Copy() self.assertEqual(pmf, pmf2) xs, ys = pmf.Render() self.assertEqual(tuple(xs), tuple(sorted(pmf.Values()))) def testPmfAddSub(self): pmf = thinkstats2.Pmf([1, 2, 3, 4, 5, 6]) pmf1 = pmf + 1 self.assertAlmostEqual(pmf1.Mean(), 4.5) pmf2 = pmf + pmf self.assertAlmostEqual(pmf2.Mean(), 7.0) pmf3 = pmf - 1 self.assertAlmostEqual(pmf3.Mean(), 2.5) pmf4 = pmf - pmf self.assertAlmostEqual(pmf4.Mean(), 0) def testPmfMulDiv(self): pmf = thinkstats2.Pmf([1, 2, 3, 4, 5, 6]) pmf1 = pmf * 2 self.assertAlmostEqual(pmf1.Mean(), 7) pmf2 = pmf * pmf self.assertAlmostEqual(pmf2.Mean(), 12.25) pmf3 = pmf / 2 self.assertAlmostEqual(pmf3.Mean(), 1.75) pmf4 = pmf / pmf self.assertAlmostEqual(pmf4.Mean(), 1.4291667) def testPmfProbLess(self): d6 = thinkstats2.Pmf(range(1,7)) self.assertEqual(d6.ProbLess(4), 0.5) self.assertEqual(d6.ProbGreater(3), 0.5) two = d6 + d6 three = two + d6 self.assertAlmostEqual(two > three, 0.15200617284) self.assertAlmostEqual(two < three, 0.778549382716049) self.assertAlmostEqual(two.ProbGreater(three), 0.15200617284) self.assertAlmostEqual(two.ProbLess(three), 0.778549382716049) def testPmfMax(self): d6 = thinkstats2.Pmf(range(1,7)) two = d6 + d6 three = two + d6 cdf = three.Max(6) thinkplot.Cdf(cdf) self.assertAlmostEqual(cdf[14], 0.558230962626) def testCdf(self): t = [1, 2, 2, 3, 5] pmf = thinkstats2.Pmf(t) hist = thinkstats2.Hist(t) cdf = thinkstats2.Cdf(pmf) self.assertEqual(len(str(cdf)), 37) self.assertEqual(cdf[0], 0) self.assertAlmostEqual(cdf[1], 0.2) self.assertAlmostEqual(cdf[2], 0.6) self.assertAlmostEqual(cdf[3], 0.8) self.assertAlmostEqual(cdf[4], 0.8) self.assertAlmostEqual(cdf[5], 1) self.assertAlmostEqual(cdf[6], 1) xs = range(7) ps = cdf.Probs(xs) for p1, p2 in zip(ps, [0, 0.2, 0.6, 0.8, 0.8, 1, 1]): self.assertAlmostEqual(p1, p2) self.assertEqual(cdf.Value(0), 1) self.assertEqual(cdf.Value(0.1), 1) self.assertEqual(cdf.Value(0.2), 1) self.assertEqual(cdf.Value(0.3), 2) self.assertEqual(cdf.Value(0.4), 2) self.assertEqual(cdf.Value(0.5), 2) self.assertEqual(cdf.Value(0.6), 2) self.assertEqual(cdf.Value(0.7), 3) self.assertEqual(cdf.Value(0.8), 3) self.assertEqual(cdf.Value(0.9), 5) self.assertEqual(cdf.Value(1), 5) ps = np.linspace(0, 1, 11) xs = cdf.ValueArray(ps) self.assertTrue((xs == [1, 1, 1, 2, 2, 2, 2, 3, 3, 5, 5]).all()) np.random.seed(17) xs = cdf.Sample(7) self.assertListEqual(xs.tolist(), [2, 2, 1, 1, 3, 3, 3]) # when you make a Cdf from a Pdf, you might get some floating # point representation error self.assertEqual(len(cdf), 4) self.assertAlmostEqual(cdf.Prob(2), 0.6) self.assertAlmostEqual(cdf[2], 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.MakeCdfFromPmf(pmf) self.assertEqual(len(cdf), 4) self.assertAlmostEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.MakeCdfFromItems(pmf.Items()) self.assertEqual(len(cdf), 4) self.assertAlmostEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.Cdf(pmf.d) self.assertEqual(len(cdf), 4) self.assertAlmostEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.MakeCdfFromDict(pmf.d) self.assertEqual(len(cdf), 4) self.assertAlmostEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.Cdf(hist) self.assertEqual(len(cdf), 4) self.assertEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.MakeCdfFromHist(hist) self.assertEqual(len(cdf), 4) self.assertEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.Cdf(t) self.assertEqual(len(cdf), 4) self.assertEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.MakeCdfFromList(t) self.assertEqual(len(cdf), 4) self.assertEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf = thinkstats2.Cdf(Counter(t)) self.assertEqual(len(cdf), 4) self.assertEqual(cdf.Prob(2), 0.6) self.assertEqual(cdf.Value(0.6), 2) cdf2 = cdf.Copy() self.assertEqual(cdf2.Prob(2), 0.6) self.assertEqual(cdf2.Value(0.6), 2) def testShift(self): t = [1, 2, 2, 3, 5] cdf = thinkstats2.Cdf(t) cdf2 = cdf.Shift(1) self.assertEqual(cdf[1], cdf2[2]) def testScale(self): t = [1, 2, 2, 3, 5] cdf = thinkstats2.Cdf(t) cdf2 = cdf.Scale(2) self.assertEqual(cdf[2], cdf2[4]) def testCdfRender(self): t = [1, 2, 2, 3, 5] cdf = thinkstats2.Cdf(t) xs, ps = cdf.Render() self.assertEqual(xs[0], 1) self.assertEqual(ps[2], 0.2) self.assertEqual(sum(xs), 22) self.assertEqual(sum(ps), 4.2) def testPmfFromCdf(self): t = [1, 2, 2, 3, 5] pmf = thinkstats2.Pmf(t) cdf = thinkstats2.Cdf(pmf) pmf2 = thinkstats2.Pmf(cdf) for x in pmf.Values(): self.assertAlmostEqual(pmf[x], pmf2[x]) pmf3 = cdf.MakePmf() for x in pmf.Values(): self.assertAlmostEqual(pmf[x], pmf3[x]) def testNormalPdf(self): pdf = thinkstats2.NormalPdf(mu=1, sigma=2) self.assertEqual(len(str(pdf)), 29) self.assertAlmostEqual(pdf.Density(3), 0.12098536226) pmf = pdf.MakePmf() self.assertAlmostEqual(pmf[1.0], 0.0239951295619) xs, ps = pdf.Render() self.assertEqual(xs[0], -5.0) self.assertAlmostEqual(ps[0], 0.0022159242059690038) pmf = thinkstats2.Pmf(pdf) self.assertAlmostEqual(pmf[1.0], 0.0239951295619) xs, ps = pmf.Render() self.assertEqual(xs[0], -5.0) self.assertAlmostEqual(ps[0], 0.00026656181123) cdf = thinkstats2.Cdf(pdf) self.assertAlmostEqual(cdf[1.0], 0.51199756478094904) xs, ps = cdf.Render() self.assertEqual(xs[0], -5.0) self.assertAlmostEqual(ps[0], 0.0) def testExponentialPdf(self): pdf = thinkstats2.ExponentialPdf(lam=0.5) self.assertEqual(len(str(pdf)), 24) self.assertAlmostEqual(pdf.Density(3), 0.11156508007421491) pmf = pdf.MakePmf() self.assertAlmostEqual(pmf[1.0], 0.02977166586593202) xs, ps = pdf.Render() self.assertEqual(xs[0], 0.0) self.assertAlmostEqual(ps[0], 0.5) def testEstimatedPdf(self): pdf = thinkstats2.EstimatedPdf([1, 2, 2, 3, 5]) self.assertEqual(len(str(pdf)), 30) self.assertAlmostEqual(pdf.Density(3)[0], 0.19629968) pmf = pdf.MakePmf() self.assertAlmostEqual(pmf[1.0], 0.010172282816895044) pmf = pdf.MakePmf(low=0, high=6) self.assertAlmostEqual(pmf[0.0], 0.0050742294053582942) def testEvalNormalCdf(self): p = thinkstats2.EvalNormalCdf(0) self.assertAlmostEqual(p, 0.5) p = thinkstats2.EvalNormalCdf(2, 2, 3) self.assertAlmostEqual(p, 0.5) p = thinkstats2.EvalNormalCdf(1000, 0, 1) self.assertAlmostEqual(p, 1.0) p = thinkstats2.EvalNormalCdf(-1000, 0, 1) self.assertAlmostEqual(p, 0.0) x = thinkstats2.EvalNormalCdfInverse(0.95, 0, 1) self.assertAlmostEqual(x, 1.64485362695) x = thinkstats2.EvalNormalCdfInverse(0.05, 0, 1) self.assertAlmostEqual(x, -1.64485362695) def testEvalPoissonPmf(self): p = thinkstats2.EvalPoissonPmf(2, 1) self.assertAlmostEqual(p, 0.1839397205) def testCov(self): t = [0, 4, 7, 3, 8, 1, 6, 2, 9, 5] a = np.array(t) t2 = [5, 4, 3, 0, 8, 9, 7, 6, 2, 1] self.assertAlmostEqual(thinkstats2.Cov(t, a), 8.25) self.assertAlmostEqual(thinkstats2.Cov(t, -a), -8.25) self.assertAlmostEqual(thinkstats2.Corr(t, a), 1) self.assertAlmostEqual(thinkstats2.Corr(t, -a), -1) self.assertAlmostEqual(thinkstats2.Corr(t, t2), -0.1878787878) self.assertAlmostEqual(thinkstats2.SpearmanCorr(t, -a), -1) self.assertAlmostEqual(thinkstats2.SpearmanCorr(t, t2), -0.1878787878) def testReadStataDct(self): dct = thinkstats2.ReadStataDct('2002FemPreg.dct') self.assertEqual(len(dct.variables), 243) self.assertEqual(len(dct.colspecs), 243) self.assertEqual(len(dct.names), 243) self.assertEqual(dct.colspecs[-1][1], -1) if __name__ == "__main__": unittest.main()
gpl-3.0
JVillella/tensorflow
tensorflow/python/util/decorator_utils_test.py
139
4197
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """decorator_utils tests.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import decorator_utils def _test_function(unused_arg=0): pass class GetQualifiedNameTest(test.TestCase): def test_method(self): self.assertEqual( "GetQualifiedNameTest.test_method", decorator_utils.get_qualified_name(GetQualifiedNameTest.test_method)) def test_function(self): self.assertEqual("_test_function", decorator_utils.get_qualified_name(_test_function)) class AddNoticeToDocstringTest(test.TestCase): def _check(self, doc, expected): self.assertEqual( decorator_utils.add_notice_to_docstring( doc=doc, instructions="Instructions", no_doc_str="Nothing here", suffix_str="(suffix)", notice=["Go away"]), expected) def test_regular(self): expected = ("Brief (suffix)\n\nGo away\nInstructions\n\nDocstring\n\n" "Args:\n arg1: desc") # No indent for main docstring self._check("Brief\n\nDocstring\n\nArgs:\n arg1: desc", expected) # 2 space indent for main docstring, blank lines not indented self._check("Brief\n\n Docstring\n\n Args:\n arg1: desc", expected) # 2 space indent for main docstring, blank lines indented as well. self._check("Brief\n \n Docstring\n \n Args:\n arg1: desc", expected) # No indent for main docstring, first line blank. self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc", expected) # 2 space indent, first line blank. self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc", expected) def test_brief_only(self): expected = "Brief (suffix)\n\nGo away\nInstructions" self._check("Brief", expected) self._check("Brief\n", expected) self._check("Brief\n ", expected) self._check("\nBrief\n ", expected) self._check("\n Brief\n ", expected) def test_no_docstring(self): expected = "Nothing here\n\nGo away\nInstructions" self._check(None, expected) self._check("", expected) def test_no_empty_line(self): expected = "Brief (suffix)\n\nGo away\nInstructions\n\nDocstring" # No second line indent self._check("Brief\nDocstring", expected) # 2 space second line indent self._check("Brief\n Docstring", expected) # No second line indent, first line blank self._check("\nBrief\nDocstring", expected) # 2 space second line indent, first line blank self._check("\n Brief\n Docstring", expected) class ValidateCallableTest(test.TestCase): def test_function(self): decorator_utils.validate_callable(_test_function, "test") def test_method(self): decorator_utils.validate_callable(self.test_method, "test") def test_callable(self): class TestClass(object): def __call__(self): pass decorator_utils.validate_callable(TestClass(), "test") def test_partial(self): partial = functools.partial(_test_function, unused_arg=7) decorator_utils.validate_callable(partial, "test") def test_fail_non_callable(self): x = 0 self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test") if __name__ == "__main__": test.main()
apache-2.0
henrytao-me/openerp.positionq
openerp/addons/delivery/report/__init__.py
74
1071
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import shipping # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
pniebla/test-repo-console
svn/git-1.8.3.3.tar/git-1.8.3.3/git-1.8.3.3/git_remote_helpers/git/exporter.py
46
1700
import os import subprocess import sys from git_remote_helpers.util import check_call class GitExporter(object): """An exporter for testgit repositories. The exporter simply delegates to git fast-export. """ def __init__(self, repo): """Creates a new exporter for the specified repo. """ self.repo = repo def export_repo(self, base, refs=None): """Exports a fast-export stream for the given directory. Simply delegates to git fast-epxort and pipes it through sed to make the refs show up under the prefix rather than the default refs/heads. This is to demonstrate how the export data can be stored under it's own ref (using the refspec capability). If None, refs defaults to ["HEAD"]. """ if not refs: refs = ["HEAD"] dirname = self.repo.get_base_path(base) path = os.path.abspath(os.path.join(dirname, 'testgit.marks')) if not os.path.exists(dirname): os.makedirs(dirname) print "feature relative-marks" if os.path.exists(os.path.join(dirname, 'git.marks')): print "feature import-marks=%s/git.marks" % self.repo.hash print "feature export-marks=%s/git.marks" % self.repo.hash sys.stdout.flush() args = ["git", "--git-dir=" + self.repo.gitpath, "fast-export", "--export-marks=" + path] if os.path.exists(path): args.append("--import-marks=" + path) args.extend(refs) p1 = subprocess.Popen(args, stdout=subprocess.PIPE) args = ["sed", "s_refs/heads/_" + self.repo.prefix + "_g"] check_call(args, stdin=p1.stdout)
mit
roger-zhao/ardupilot-3.5-dev
Tools/mavproxy_modules/magcal_graph.py
108
3748
# Copyright (C) 2016 Intel Corporation. All rights reserved. # # This file is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. ''' This module shows the geodesic sections hit by the samples collected during compass calibration, and also some status data. The objective of this module is to provide a reference on how to interpret the field `completion_mask` from the MAG_CAL_PROGRESS mavlink message. That information can be used in order to guide the vehicle user during calibration. The plot shown by this module isn't very helpful to the end user, but it might help developers during development of internal calibration support in ground control stations. ''' from MAVProxy.modules.lib import mp_module, mp_util import multiprocessing class MagcalGraph(): def __init__(self): self.parent_pipe, self.child_pipe = multiprocessing.Pipe() self.ui_process = None self._last_mavlink_msgs = {} def start(self): if self.is_active(): return if self.ui_process: self.ui_process.join() for l in self._last_mavlink_msgs.values(): for m in l: if not m: continue self.parent_pipe.send(m) self.ui_process = multiprocessing.Process(target=self.ui_task) self.ui_process.start() def stop(self): if not self.is_active(): return self.parent_pipe.send('close') self.ui_process.join() def ui_task(self): mp_util.child_close_fds() from MAVProxy.modules.lib import wx_processguard from MAVProxy.modules.lib.wx_loader import wx from lib.magcal_graph_ui import MagcalFrame app = wx.App(False) app.frame = MagcalFrame(self.child_pipe) app.frame.Show() app.MainLoop() def is_active(self): return self.ui_process is not None and self.ui_process.is_alive() def mavlink_packet(self, m): if m.compass_id not in self._last_mavlink_msgs: # Keep the two last messages so that, if one is the calibration # report message, the previous one is the last progress message. self._last_mavlink_msgs[m.compass_id] = [None, m] else: l = self._last_mavlink_msgs[m.compass_id] l[0] = l[1] l[1] = m if not self.is_active(): return self.parent_pipe.send(m) class MagcalGraphModule(mp_module.MPModule): def __init__(self, mpstate): super(MagcalGraphModule, self).__init__(mpstate, 'magcal_graph') self.add_command( 'magcal_graph', self.cmd_magcal_graph, 'open a window to report magcal progress and plot geodesic ' + 'sections hit by the collected data in real time', ) self.graph = MagcalGraph() def cmd_magcal_graph(self, args): self.graph.start() def mavlink_packet(self, m): if m.get_type() not in ('MAG_CAL_PROGRESS', 'MAG_CAL_REPORT'): return self.graph.mavlink_packet(m) def unload(self): self.graph.stop() def init(mpstate): return MagcalGraphModule(mpstate)
gpl-3.0
Slater-Victoroff/scrapy
scrapy/utils/benchserver.py
130
1312
import random from six.moves.urllib.parse import urlencode from twisted.web.server import Site from twisted.web.resource import Resource from twisted.internet import reactor class Root(Resource): isLeaf = True def getChild(self, name, request): return self def render(self, request): total = _getarg(request, 'total', 100, int) show = _getarg(request, 'show', 10, int) nlist = [random.randint(1, total) for _ in range(show)] request.write("<html><head></head><body>") args = request.args.copy() for nl in nlist: args['n'] = nl argstr = urlencode(args, doseq=True) request.write("<a href='/follow?{0}'>follow {1}</a><br>" .format(argstr, nl)) request.write("</body></html>") return '' def _getarg(request, name, default=None, type=str): return type(request.args[name][0]) \ if name in request.args else default if __name__ == '__main__': root = Root() factory = Site(root) httpPort = reactor.listenTCP(8998, Site(root)) def _print_listening(): httpHost = httpPort.getHost() print("Bench server at http://{}:{}".format(httpHost.host, httpHost.port)) reactor.callWhenRunning(_print_listening) reactor.run()
bsd-3-clause
GiorgosMethe/Soft-Robots-Novelty-Search
cppn-neat/NE/HyperNEAT/HyperNEAT_Visualizer/src/HyperNEATVisualizer.py
2
17730
from Defines import * from SubstrateRenderer import * #populationFileName = "C:/Programming/NE/HyperNEAT/out/Results/GoNoScaling1000Gens/testGoNoScaling_T2610_Euler_Run$RUN_NUMBER$.xm_best.xml.gz" #populationFileName = "C:/Programming/NE/HyperNEAT/out/Results/GoNoScaling_T2718/testGoNoScaling_T2718_Hilbert_Run$RUN_NUMBER$.xml.backup.xml.gz" populationFileName = "../../out/Results/GoScalingBasic_T2718/GoScalingBasic_T2718_Euler_Run$RUN_NUMBER$.xml.backup.xml.gz" outputDirName = "../../out/images" #outputDirName = "/Users/pawn/Programming/NE/HyperNEAT/out/images" CAMERA_SPEED = 5 class HyperNEATVisualizer(object): def __init__(self): # Number of the glut window. self.window = 0 self.substrateRenderer = None self.eyePos = (0,15,-15) self.lookAtPos = (0,0,0) self.distance = sqrt(15*15 + 15*15) self.upDirection = (0,1,0) self.translateVelocity = (0,0,0) self.lookdownAngle = 45.0*pi/180.0 self.turnAngle = pi self.lookdownVelocity = 0 self.turnVelocity = 0 self.distanceVelocity = 0 self.width = 800 self.height = 600 self.mousePos = (0,0) self.currentGeneration = 0 self.currentIndividual = 0 self.currentRun = 1 # A general OpenGL initialization function. Sets all of the initial parameters. def InitGL(self,Width, Height): # We call this right after our OpenGL window is created. glEnable(GL_TEXTURE_2D) glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black glClearDepth(1.0) # Enables Clearing Of The Depth Buffer glDepthFunc(GL_LESS) # The Type Of Depth Test To Do glEnable(GL_DEPTH_TEST) # Enables Depth Testing glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading glEnable (GL_LINE_SMOOTH) glMatrixMode(GL_PROJECTION) glLoadIdentity() # Reset The Projection Matrix # Calculate The Aspect Ratio Of The Window gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) # The function called when our window is resized (which shouldn't happen if you enable fullscreen, below) def ReSizeGLScene(self,Width, Height): if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small Height = 1 self.width = Width self.height = Height glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) #glRotatef(90,1.0,0,0) #glTranslatef(0, -15.0, 0) self.toCameraVector = Vector3(self.distance,0,0) #Adjust for lookdown angle self.toCameraVector.x = self.distance*cos(self.lookdownAngle) self.toCameraVector.y = self.distance*sin(self.lookdownAngle) #Now adjust for heading self.toCameraVector.z = self.toCameraVector.x*cos(self.turnAngle) self.toCameraVector.x = self.toCameraVector.y*sin(self.turnAngle) gluLookAt( self.lookAtPos[0]+self.toCameraVector.x, self.lookAtPos[1]+self.toCameraVector.y, self.lookAtPos[2]+self.toCameraVector.z, self.lookAtPos[0], self.lookAtPos[1], self.lookAtPos[2], self.upDirection[0], self.upDirection[1], self.upDirection[2] ) glMatrixMode(GL_MODELVIEW) def renderString(self,loc,string): glRasterPos2f(loc[0],loc[1]) for character in string: glutBitmapCharacter(GLUT_BITMAP_9_BY_15,ord(character)) # The main drawing function. def DrawGLScene(self): glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(self.width)/float(self.height), 0.1, 100.0) #glRotatef(90,1.0,0,0) #glTranslatef(0, -15.0, 0) self.toCameraVector = Vector3(self.distance,0,0) #Adjust for lookdown angle self.toCameraVector.x = self.distance*cos(self.lookdownAngle) self.toCameraVector.y = self.distance*sin(self.lookdownAngle) #Now adjust for heading self.toCameraVector.z = self.toCameraVector.x*cos(self.turnAngle) self.toCameraVector.x = self.toCameraVector.x*sin(self.turnAngle) gluLookAt( self.lookAtPos[0]+self.toCameraVector.x, self.lookAtPos[1]+self.toCameraVector.y, self.lookAtPos[2]+self.toCameraVector.z, self.lookAtPos[0], self.lookAtPos[1], self.lookAtPos[2], self.upDirection[0], self.upDirection[1], self.upDirection[2] ) glMatrixMode(GL_MODELVIEW) glClearColor(0.0,0.5,0.75,1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Clear The Screen And The Depth Buffer glLoadIdentity() # Reset The View self.substrateRenderer.update() self.substrateRenderer.render() #glTranslatef(0.0,0.0,-5.0) # Move Into The Screen glMatrixMode(GL_MODELVIEW) glPushMatrix() glLoadIdentity() glMatrixMode(GL_PROJECTION) glPushMatrix() glLoadIdentity() glOrtho( 0, 640, 480, 0, -2000, 2000 ) glDisable(GL_DEPTH_TEST) glColor4f(1,1,1,1) self.renderString((60,400),'Run: '+str(self.currentRun)) self.renderString((60,430),'Generation: '+str(self.currentGeneration+1)+'/'+str(self.population.getGenerationCount())) self.renderString((60,460),'Individual: '+str(self.currentIndividual+1)+'/'+str(self.population.getIndividualCount(self.currentGeneration))) glEnable(GL_DEPTH_TEST) glMatrixMode(GL_MODELVIEW) glPopMatrix() glMatrixMode(GL_PROJECTION) glPopMatrix() # since this is double buffered, swap the buffers to display what just got drawn. glutSwapBuffers() def mouseMoved(self,x,y): self.mousePos = (x,y) def mouseChanged(self,button,state,x,y): if state: self.mousePressed(button, x, y) else: self.mouseReleased(button, x, y) def mousePressed(self,button,x,y): pass def mouseReleased(self,button,x,y): print 'Clicked on pixel',(x,y) for rectNodePair in self.substrateRenderer.nodeScreenRects: rect,node = rectNodePair[0],rectNodePair[1] if rect[0][0]<=x and rect[1][0]>=x and rect[0][1]<=y and rect[1][1]>=y: if node[2]>0: #Don't do anything if the user didn't click on an input node break print 'Clicked on node',node print self.substrateRenderer.hardcodedInputs.get(node,0.0),'->', print button if button == GLUT_LEFT_BUTTON: modifier = 0.5 elif button == GLUT_RIGHT_BUTTON: modifier = -0.5 else: modifier = 0.0 self.substrateRenderer.hardcodedInputs[node] = \ self.substrateRenderer.hardcodedInputs.get(node,0.0) + modifier print self.substrateRenderer.hardcodedInputs.get(node,0.0) self.substrateRenderer.networkDirty = True break pass # The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y) def keyPressed(self,*args): if args[0] == '\033': #Escape key pass elif args[0]==GLUT_KEY_UP: self.lookdownVelocity += CAMERA_SPEED elif args[0]==GLUT_KEY_DOWN: self.lookdownVelocity -= CAMERA_SPEED elif args[0]==GLUT_KEY_LEFT: self.turnVelocity -= CAMERA_SPEED elif args[0]==GLUT_KEY_RIGHT: self.turnVelocity += CAMERA_SPEED elif args[0]=='q': self.distanceVelocity -= CAMERA_SPEED*0.25 elif args[0]=='e': self.distanceVelocity += CAMERA_SPEED*0.25 else: print args # The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y) def keyReleased(self,*args): print args # If escape is pressed, kill everything. if args[0] == '\033': glutDestroyWindow(self.window) sys.exit() elif args[0]==GLUT_KEY_UP: self.lookdownVelocity -= CAMERA_SPEED elif args[0]==GLUT_KEY_DOWN: self.lookdownVelocity += CAMERA_SPEED elif args[0]==GLUT_KEY_LEFT: self.turnVelocity += CAMERA_SPEED elif args[0]==GLUT_KEY_RIGHT: self.turnVelocity -= CAMERA_SPEED elif args[0]=='q': self.distanceVelocity += CAMERA_SPEED*0.25 elif args[0]=='e': self.distanceVelocity -= CAMERA_SPEED*0.25 elif args[0]=='a': self.substrate.dumpWeightsFrom( outputDirName, NEAT_Vector3( self.substrateRenderer.nodeSelected[0], self.substrateRenderer.nodeSelected[1], self.substrateRenderer.nodeSelected[2] ) ) self.substrate.dumpActivationLevels(outputDirName) elif args[0]=='=': self.currentRun += 1 self.loadPopulation() self.loadIndividual() elif args[0]=='-': self.currentRun = max(0,self.currentRun-1) self.loadPopulation() self.loadIndividual() elif args[0]=='[' or args[0]=='{': if (glutGetModifiers()&GLUT_ACTIVE_SHIFT)==0: self.currentGeneration = max(0,self.currentGeneration-1) self.loadIndividual() else: self.currentGeneration = max(0,self.currentGeneration-10) self.loadIndividual() elif args[0]==']' or args[0]=='}': if (glutGetModifiers()&GLUT_ACTIVE_SHIFT)==0: self.currentGeneration = min(self.population.getGenerationCount()-1,self.currentGeneration+1) self.loadIndividual() else: self.currentGeneration = min(self.population.getGenerationCount()-1,self.currentGeneration+10) self.loadIndividual() elif args[0]==',' or args[0]=='<': if (glutGetModifiers()&GLUT_ACTIVE_SHIFT)==0: self.currentIndividual = max(0,self.currentIndividual-1) self.loadIndividual() else: self.currentIndividual = max(0,self.currentIndividual-10) self.loadIndividual() elif args[0]=='.' or args[0]=='>': if (glutGetModifiers()&GLUT_ACTIVE_SHIFT)==0: self.currentIndividual = min(self.population.getIndividualCount(self.currentGeneration)-1,self.currentIndividual+1) self.loadIndividual() else: self.currentIndividual = min(self.population.getIndividualCount(self.currentGeneration)-1,self.currentIndividual+10) self.loadIndividual() else: print args def loadPopulation(self): while True: try: self.population = loadFromPopulation(populationFileName.replace('$RUN_NUMBER$',('%d' % self.currentRun))) break except: self.currentRun += 1 if self.currentRun>=1000: raise Exception("Error loading XML file(s)") def loadIndividual(self): if self.substrateRenderer is not None: hardcodedInputs = self.substrateRenderer.hardcodedInputs else: hardcodedInputs = {} experimentType = getExperimentType() if experimentType == 15 \ or experimentType == 16 \ or experimentType == 21 \ or experimentType == 24: for x in xrange(0,8,2): for y in xrange(0,3): hardcodedInputs[(x+y%2,y,0)] = 0.5 for x in xrange(0,8,2): for y in xrange(5,8): hardcodedInputs[(x+y%2,y,0)] = -0.5 print 'Generation:',self.currentGeneration self.substrate.populateSubstrate(self.population.getIndividual(self.currentIndividual,self.currentGeneration)) print 'CREATING SUBSTRATE RENDERER' self.substrateRenderer = SubstrateRenderer(self.substrate,hardcodedInputs) def initNEAT(self): print "INIT NEAT1" self.loadPopulation() print "INIT NEAT2" self.substrate = LayeredSubstrate() print "INIT NEAT3" layerSizes = [(8,8),(8,8),(1,1)] layerAdjacencyList = [(0,1),(1,2)] layerIsInput = [True,False,False] layerLocations = [(0,0,0),(0,4,0),(0,8,0)] normalize = False useOldOutputNames = True self.substrate.setLayerInfoFromCurrentExperiment() print "INIT NEAT4" self.loadIndividual() print "INIT NEAT5" def update(self,value): #print 'updating' self.lookdownAngle += (self.lookdownVelocity*10.0/1000.0) self.lookdownAngle = min(pi/2,max(0,self.lookdownAngle)) self.turnAngle += (self.turnVelocity*10.0/1000.0) while self.turnAngle>pi: self.turnAngle -= 2*pi while self.turnAngle<-pi: self.turnAngle += 2*pi self.distance += self.distanceVelocity self.distance = max(1,self.distance) #print self.lookdownAngle #print self.mousePos self.substrateRenderer.nodeSelected = (-1,-1,-1) for rectNodePair in self.substrateRenderer.nodeScreenRects: rect,node = rectNodePair[0],rectNodePair[1] if rect[0][0]<=self.mousePos[0] and rect[1][0]>=self.mousePos[0] \ and rect[0][1]<=self.mousePos[1] and rect[1][1]>=self.mousePos[1]: self.substrateRenderer.nodeSelected = node break glutTimerFunc(10,self.update,0) def main(self): self.initNEAT() # For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...) # Once I find out the right stuff based on reading the PyOpenGL source, I'll address this. glutInit(sys.argv) # Select type of Display mode: # Double buffer # RGBA color # Alpha components supported # Depth buffer glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH) # get a 640 x 480 window glutInitWindowSize(640, 480) # the window starts at the upper left corner of the screen glutInitWindowPosition(0, 0) # Okay, like the C version we retain the window id to use when closing, but for those of you new # to Python (like myself), remember this assignment would make the variable local and not global # if it weren't for the global declaration at the start of main. self.window = glutCreateWindow("HyperNEAT Substrate Visualizer") glutIgnoreKeyRepeat(1) # Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to # set the function pointer and invoke a function to actually register the callback, otherwise it # would be very much like the C version of the code. glutDisplayFunc(self.DrawGLScene) # Uncomment this line to get full screen. # glutFullScreen() # When we are doing nothing, redraw the scene. glutIdleFunc(self.DrawGLScene) # Register the function called when our window is resized. glutReshapeFunc(self.ReSizeGLScene) # Register the function called when the keyboard is pressed. glutKeyboardFunc(self.keyPressed) glutSpecialFunc(self.keyPressed) # Register the function called when the keyboard is released. glutKeyboardUpFunc(self.keyReleased) glutSpecialUpFunc(self.keyReleased) glutMouseFunc(self.mouseChanged) glutMotionFunc(self.mouseMoved) glutPassiveMotionFunc(self.mouseMoved) # Initialize our window. self.InitGL(640, 480) glutTimerFunc(10,self.update,0) # Start Event Processing Engine glutMainLoop() if __name__ == "__main__": initializeHyperNEAT() print(os.getcwd()) visualizer = HyperNEATVisualizer() visualizer.main() del visualizer cleanupHyperNEAT() sys.exit(0)
mit
mattjmorrison/ReportLab
src/reportlab/graphics/samples/runall.py
20
2011
# runs all the GUIedit charts in this directory - # makes a PDF sample for eaxh existing chart type import sys import glob import string import inspect import types def moduleClasses(mod): def P(obj, m=mod.__name__, CT=types.ClassType): return (type(obj)==CT and obj.__module__==m) try: return inspect.getmembers(mod, P)[0][1] except: return None def getclass(f): return moduleClasses(__import__(f)) def run(format, VERBOSE=0): formats = string.split(format, ',') for i in range(0, len(formats)): formats[i] == string.lower(string.strip(formats[i])) allfiles = glob.glob('*.py') allfiles.sort() for fn in allfiles: f = string.split(fn, '.')[0] c = getclass(f) if c != None: print c.__name__ try: for fmt in formats: if fmt: c().save(formats=[fmt],outDir='.',fnRoot=c.__name__) if VERBOSE: print " %s.%s" % (c.__name__, fmt) except: print " COULDN'T CREATE '%s.%s'!" % (c.__name__, format) if __name__ == "__main__": if len(sys.argv) == 1: run('pdf,pict,png') else: try: if sys.argv[1] == "-h": print 'usage: runall.py [FORMAT] [-h]' print ' if format is supplied is should be one or more of pdf,gif,eps,png etc' print ' if format is missing the following formats are assumed: pdf,pict,png' print ' -h prints this message' else: t = sys.argv[1:] for f in t: run(f) except: print 'usage: runall.py [FORMAT][-h]' print ' if format is supplied is should be one or more of pdf,gif,eps,png etc' print ' if format is missing the following formats are assumed: pdf,pict,png' print ' -h prints this message' raise
bsd-3-clause
tareqalayan/ansible
lib/ansible/modules/cloud/google/gcp_compute_instance_group.py
8
14223
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_instance_group description: - Represents an Instance Group resource. Instance groups are self-managed and can contain identical or different instances. Instance groups do not use an instance template. Unlike managed instance groups, you must create and add instances to an instance group manually. short_description: Creates a GCP InstanceGroup version_added: 2.6 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: ['present', 'absent'] default: 'present' description: description: - An optional description of this resource. Provide this property when you create the resource. required: false name: description: - The name of the instance group. - The name must be 1-63 characters long, and comply with RFC1035. required: false named_ports: description: - Assigns a name to a port number. - 'For example: {name: "http", port: 80}.' - This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group.' required: false suboptions: name: description: - The name for this named port. - The name must be 1-63 characters long, and comply with RFC1035. required: false port: description: - The port number, which can be a value between 1 and 65535. required: false network: description: - A reference to Network resource. required: false region: description: - A reference to Region resource. required: false subnetwork: description: - A reference to Subnetwork resource. required: false zone: description: - A reference to Zone resource. required: true extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: create a network gcp_compute_network: name: 'network-instancegroup' project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" scopes: - https://www.googleapis.com/auth/compute state: present register: network - name: create a instance group gcp_compute_instance_group: name: testObject named_ports: - name: ansible port: 1234 network: "{{ network }}" zone: 'us-central1-a' project: testProject auth_kind: service_account service_account_file: /tmp/auth.pem scopes: - https://www.googleapis.com/auth/compute state: present ''' RETURN = ''' creation_timestamp: description: - Creation timestamp in RFC3339 text format. returned: success type: str description: description: - An optional description of this resource. Provide this property when you create the resource. returned: success type: str id: description: - A unique identifier for this instance group. returned: success type: int name: description: - The name of the instance group. - The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str named_ports: description: - Assigns a name to a port number. - 'For example: {name: "http", port: 80}.' - This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group.' returned: success type: complex contains: name: description: - The name for this named port. - The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str port: description: - The port number, which can be a value between 1 and 65535. returned: success type: int network: description: - A reference to Network resource. returned: success type: dict region: description: - A reference to Region resource. returned: success type: str subnetwork: description: - A reference to Subnetwork resource. returned: success type: dict zone: description: - A reference to Zone resource. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json import re import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), description=dict(type='str'), name=dict(type='str'), named_ports=dict(type='list', elements='dict', options=dict( name=dict(type='str'), port=dict(type='int') )), network=dict(type='dict'), region=dict(type='str'), subnetwork=dict(type='dict'), zone=dict(required=True, type='str') ) ) state = module.params['state'] kind = 'compute#instanceGroup' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): fetch = update(module, self_link(module), kind, fetch) changed = True else: delete(module, self_link(module), kind, fetch) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, kind, fetch): module.fail_json(msg="InstanceGroup cannot be edited") def delete(module, link, kind, fetch): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.delete(link)) def resource_to_request(module): request = { u'kind': 'compute#instanceGroup', u'description': module.params.get('description'), u'name': module.params.get('name'), u'namedPorts': InstaGroupNamedPortsArray(module.params.get('named_ports', []), module).to_request(), u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), u'region': region_selflink(module.params.get('region'), module.params), u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink') } return_vals = {} for k, v in request.items(): if v: return_vals[k] = v return return_vals def fetch_resource(module, link, kind): auth = GcpSession(module, 'compute') return return_if_object(module, auth.get(link), kind) def self_link(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}".format(**module.params) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups".format(**module.params) def return_if_object(module, response, kind): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) if result['kind'] != kind: module.fail_json(msg="Incorrect result: {kind}".format(**result)) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'creationTimestamp': response.get(u'creationTimestamp'), u'description': response.get(u'description'), u'id': response.get(u'id'), u'name': response.get(u'name'), u'namedPorts': InstaGroupNamedPortsArray(response.get(u'namedPorts', []), module).from_response(), u'network': response.get(u'network'), u'region': response.get(u'region'), u'subnetwork': response.get(u'subnetwork') } def region_selflink(name, params): if name is None: return url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/[a-z1-9\-]*" if not re.match(url, name): name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name return name def async_op_url(module, extra_data=None): if extra_data is None: extra_data = {} url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" combined = extra_data.copy() combined.update(module.params) return url.format(**combined) def wait_for_operation(module, response): op_result = return_if_object(module, response, 'compute#operation') if op_result is None: return None status = navigate_hash(op_result, ['status']) wait_done = wait_for_completion(status, op_result, module) return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroup') def wait_for_completion(status, op_result, module): op_id = navigate_hash(op_result, ['name']) op_uri = async_op_url(module, {'op_id': op_id}) while status != 'DONE': raise_if_errors(op_result, ['error', 'errors'], 'message') time.sleep(1.0) if status not in ['PENDING', 'RUNNING', 'DONE']: module.fail_json(msg="Invalid result %s" % status) op_result = fetch_resource(module, op_uri, 'compute#operation') status = navigate_hash(op_result, ['status']) return op_result def raise_if_errors(response, err_path, module): errors = navigate_hash(response, err_path) if errors is not None: module.fail_json(msg=errors) class InstaGroupNamedPortsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({ u'name': item.get('name'), u'port': item.get('port') }) def _response_from_item(self, item): return remove_nones_from_dict({ u'name': item.get(u'name'), u'port': item.get(u'port') }) if __name__ == '__main__': main()
gpl-3.0
rohlandm/servo
tests/wpt/web-platform-tests/tools/manifest/sourcefile.py
84
10635
import os import urlparse from fnmatch import fnmatch try: from xml.etree import cElementTree as ElementTree except ImportError: from xml.etree import ElementTree import html5lib import vcs from item import Stub, ManualTest, WebdriverSpecTest, RefTest, TestharnessTest from utils import rel_path_to_url, is_blacklisted, ContextManagerStringIO, cached_property wd_pattern = "*.py" class SourceFile(object): parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"), "xhtml":ElementTree.parse, "svg":ElementTree.parse} def __init__(self, tests_root, rel_path, url_base, use_committed=False): """Object representing a file in a source tree. :param tests_root: Path to the root of the source tree :param rel_path: File path relative to tests_root :param url_base: Base URL used when converting file paths to urls :param use_committed: Work with the last committed version of the file rather than the on-disk version. """ self.tests_root = tests_root self.rel_path = rel_path self.url_base = url_base self.use_committed = use_committed self.url = rel_path_to_url(rel_path, url_base) self.path = os.path.join(tests_root, rel_path) self.dir_path, self.filename = os.path.split(self.path) self.name, self.ext = os.path.splitext(self.filename) self.type_flag = None if "-" in self.name: self.type_flag = self.name.rsplit("-", 1)[1] self.meta_flags = self.name.split(".")[1:] def __getstate__(self): # Remove computed properties if we pickle this class rv = self.__dict__.copy() if "__cached_properties__" in rv: cached_properties = rv["__cached_properties__"] for key in rv.keys(): if key in cached_properties: del rv[key] del rv["__cached_properties__"] return rv def name_prefix(self, prefix): """Check if the filename starts with a given prefix :param prefix: The prefix to check""" return self.name.startswith(prefix) def open(self): """Return a File object opened for reading the file contents, or the contents of the file when last committed, if use_comitted is true.""" if self.use_committed: git = vcs.get_git_func(os.path.dirname(__file__)) blob = git("show", "HEAD:%s" % self.rel_path) file_obj = ContextManagerStringIO(blob) else: file_obj = open(self.path) return file_obj @property def name_is_non_test(self): """Check if the file name matches the conditions for the file to be a non-test file""" return (os.path.isdir(self.rel_path) or self.name_prefix("MANIFEST") or self.filename.startswith(".") or is_blacklisted(self.url)) @property def name_is_stub(self): """Check if the file name matches the conditions for the file to be a stub file""" return self.name_prefix("stub-") @property def name_is_manual(self): """Check if the file name matches the conditions for the file to be a manual test file""" return self.type_flag == "manual" @property def name_is_worker(self): """Check if the file name matches the conditions for the file to be a worker js test file""" return "worker" in self.meta_flags and self.ext == ".js" @property def name_is_webdriver(self): """Check if the file name matches the conditions for the file to be a webdriver spec test file""" # wdspec tests are in subdirectories of /webdriver excluding __init__.py # files. rel_dir_tree = self.rel_path.split(os.path.sep) return (rel_dir_tree[0] == "webdriver" and len(rel_dir_tree) > 2 and self.filename != "__init__.py" and fnmatch(self.filename, wd_pattern)) @property def name_is_reference(self): """Check if the file name matches the conditions for the file to be a reference file (not a reftest)""" return self.type_flag in ("ref", "notref") @property def markup_type(self): """Return the type of markup contained in a file, based on its extension, or None if it doesn't contain markup""" ext = self.ext if not ext: return None if ext[0] == ".": ext = ext[1:] if ext in ["html", "htm"]: return "html" if ext in ["xhtml", "xht", "xml"]: return "xhtml" if ext == "svg": return "svg" return None @cached_property def root(self): """Return an ElementTree Element for the root node of the file if it contains markup, or None if it does not""" if not self.markup_type: return None parser = self.parsers[self.markup_type] with self.open() as f: try: tree = parser(f) except Exception: return None if hasattr(tree, "getroot"): root = tree.getroot() else: root = tree return root @cached_property def timeout_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify timeouts""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']") @cached_property def timeout(self): """The timeout of a test or reference file. "long" if the file has an extended timeout or None otherwise""" if not self.root: return if self.timeout_nodes: timeout_str = self.timeout_nodes[0].attrib.get("content", None) if timeout_str and timeout_str.lower() == "long": return timeout_str @cached_property def viewport_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify viewport sizes""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']") @cached_property def viewport_size(self): """The viewport size of a test or reference file""" if not self.root: return None if not self.viewport_nodes: return None return self.viewport_nodes[0].attrib.get("content", None) @cached_property def dpi_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify device pixel ratios""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']") @cached_property def dpi(self): """The device pixel ratio of a test or reference file""" if not self.root: return None if not self.dpi_nodes: return None return self.dpi_nodes[0].attrib.get("content", None) @cached_property def testharness_nodes(self): """List of ElementTree Elements corresponding to nodes representing a testharness.js script""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']") @cached_property def content_is_testharness(self): """Boolean indicating whether the file content represents a testharness.js test""" if not self.root: return None return bool(self.testharness_nodes) @cached_property def variant_nodes(self): """List of ElementTree Elements corresponding to nodes representing a test variant""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']") @cached_property def test_variants(self): rv = [] for element in self.variant_nodes: if "content" in element.attrib: variant = element.attrib["content"] assert variant == "" or variant[0] in ["#", "?"] rv.append(variant) if not rv: rv = [""] return rv @cached_property def reftest_nodes(self): """List of ElementTree Elements corresponding to nodes representing a to a reftest <link>""" if not self.root: return [] match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']") mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']") return match_links + mismatch_links @cached_property def references(self): """List of (ref_url, relation) tuples for any reftest references specified in the file""" rv = [] rel_map = {"match": "==", "mismatch": "!="} for item in self.reftest_nodes: if "href" in item.attrib: ref_url = urlparse.urljoin(self.url, item.attrib["href"]) ref_type = rel_map[item.attrib["rel"]] rv.append((ref_url, ref_type)) return rv @cached_property def content_is_ref_node(self): """Boolean indicating whether the file is a non-leaf node in a reftest graph (i.e. if it contains any <link rel=[mis]match>""" return bool(self.references) def manifest_items(self): """List of manifest items corresponding to the file. There is typically one per test, but in the case of reftests a node may have corresponding manifest items without being a test itself.""" if self.name_is_non_test: rv = [] elif self.name_is_stub: rv = [Stub(self, self.url)] elif self.name_is_manual: rv = [ManualTest(self, self.url)] elif self.name_is_worker: rv = [TestharnessTest(self, self.url[:-3])] elif self.name_is_webdriver: rv = [WebdriverSpecTest(self)] elif self.content_is_testharness: rv = [] for variant in self.test_variants: url = self.url + variant rv.append(TestharnessTest(self, url, timeout=self.timeout)) elif self.content_is_ref_node: rv = [RefTest(self, self.url, self.references, timeout=self.timeout, viewport_size=self.viewport_size, dpi=self.dpi)] else: # If nothing else it's a helper file, which we don't have a specific type for rv = [] return rv
mpl-2.0
damdam-s/OpenUpgrade
addons/calendar/controllers/main.py
329
3390
import simplejson import openerp import openerp.http as http from openerp.http import request import openerp.addons.web.controllers.main as webmain import json class meeting_invitation(http.Controller): @http.route('/calendar/meeting/accept', type='http', auth="calendar") def accept(self, db, token, action, id, **kwargs): registry = openerp.modules.registry.RegistryManager.get(db) attendee_pool = registry.get('calendar.attendee') with registry.cursor() as cr: attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'accepted')]) if attendee_id: attendee_pool.do_accept(cr, openerp.SUPERUSER_ID, attendee_id) return self.view(db, token, action, id, view='form') @http.route('/calendar/meeting/decline', type='http', auth="calendar") def declined(self, db, token, action, id): registry = openerp.modules.registry.RegistryManager.get(db) attendee_pool = registry.get('calendar.attendee') with registry.cursor() as cr: attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'declined')]) if attendee_id: attendee_pool.do_decline(cr, openerp.SUPERUSER_ID, attendee_id) return self.view(db, token, action, id, view='form') @http.route('/calendar/meeting/view', type='http', auth="calendar") def view(self, db, token, action, id, view='calendar'): registry = openerp.modules.registry.RegistryManager.get(db) meeting_pool = registry.get('calendar.event') attendee_pool = registry.get('calendar.attendee') partner_pool = registry.get('res.partner') with registry.cursor() as cr: attendee = attendee_pool.search_read(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)], []) if attendee and attendee[0] and attendee[0].get('partner_id'): partner_id = int(attendee[0].get('partner_id')[0]) tz = partner_pool.read(cr, openerp.SUPERUSER_ID, partner_id, ['tz'])['tz'] else: tz = False attendee_data = meeting_pool.get_attendee(cr, openerp.SUPERUSER_ID, id, dict(tz=tz)) if attendee: attendee_data['current_attendee'] = attendee[0] values = dict(init="s.calendar.event('%s', '%s', '%s', '%s' , '%s');" % (db, action, id, 'form', json.dumps(attendee_data))) return request.render('web.webclient_bootstrap', values) # Function used, in RPC to check every 5 minutes, if notification to do for an event or not @http.route('/calendar/notify', type='json', auth="none") def notify(self): registry = request.registry uid = request.session.uid context = request.session.context with registry.cursor() as cr: res = registry.get("calendar.alarm_manager").get_next_notif(cr, uid, context=context) return res @http.route('/calendar/notify_ack', type='json', auth="none") def notify_ack(self, type=''): registry = request.registry uid = request.session.uid context = request.session.context with registry.cursor() as cr: res = registry.get("res.partner")._set_calendar_last_notif_ack(cr, uid, context=context) return res
agpl-3.0
rsoulliere/Evergreen_Mohawk
build/i18n/scripts/marc_tooltip_maker.py
11
11459
#!/usr/bin/env python # vim: set fileencoding=utf-8 : # vim:et:ts=4:sw=4: # Copyright (C) 2008 Laurentian University # Dan Scott <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ The MARC editor offers tooltips generated from the Library of Congress Concise MARC Record documentation available online. This script generates a French version of those tooltips based on the Library and Archives Canada translation of the LoC documentation. """ from BeautifulSoup import BeautifulSoup # Simple case: # Get <a id="#mrcb(###)">: map $1 to tag attribute # From within that A event, retrieve the SMALL event # If SMALL.cdata == '\s*(R)\s*' then repeatable = yes # If SMALL.cdata == '\s*(NR)\s*' then repeatable = no # Get the next P event: map to <description> element # # Target: # <field repeatable="true" tag="006"> # <description>This field contains 18 character positions (00-17) # that provide for coding information about special aspects of # the item being cataloged that cannot be coded in field 008 # (Fixed-Length Data Elements). It is used in cases when an item # has multiple characteristics. It is also used to record the coded # serial aspects of nontextual continuing resources.</description> # </field> # Complex case: # field and tag and repeatable description as above # check for <h3>Indicateurs</h3> before next <h2> # check for <li>Premier indicateur or <li>Second indicateur to set indicator.position # check for <li class="sqf">(\d)\s*-\s*([^<]*)< for indicator.position.value = def__init__ion # ignore if "Non d&#233;fini" # check for <h3>Codes do sous-zones # for each <li>: # CDATA (stripped of tags, with (NR) or (R) stripped out) = field.subfield.def__init__ion # (NR) or (R) means field.subfield.repeatable = false or true # <field repeatable="true" tag="800"> # <description>An author/title series added entry in which the # author portion is a personal name.</description> # <indicator position="1" value="0"> # <description>Forename</description> # </indicator> # <indicator position="1" value="1"> # <description>Surname</description> # </indicator> # <indicator position="1" value="3"> # <description>Family name</description> # </indicator> # <subfield code="a" repeatable="false"> # <description>Personal name </description> # </subfield> # <subfield code="b" repeatable="false"> # <description>Numeration </description> # </subfield> class MarcCollection(object): """ Contains a set of descriptions of MARC fields organized by tag """ def __init__(self): self.fields = {} def add_field(self, field): """ Add a MARC field to our collection """ self.fields[field.tag] = field def to_xml(self): """ Convert the MARC field collection to XML representation """ xml = "<?xml version='1.0' encoding='utf-8'?>\n" xml += "<fields>\n" keys = self.fields.keys() keys.sort() for key in keys: xml += self.fields[key].to_xml() xml += "\n</fields>\n" return xml class MarcField(object): """ Describes the properties of a MARC field You can directly access and manipulate the indicators and subfields lists """ def __init__(self, tag, name, repeatable, description): self.tag = tag self.name = name self.repeatable = repeatable self.description = description self.indicators = [] self.subfields = [] def to_xml(self): """ Convert the MARC field to XML representation """ xml = u" <field repeatable='%s' tag='%s'>\n" % (self.repeatable, self.tag) xml += u" <name>%s</name>\n" % (self.name) xml += u" <description>%s</description>\n" % (self.description) for ind in self.indicators: xml += ind.to_xml() xml += '\n' for subfield in self.subfields: xml += subfield.to_xml() xml += '\n' xml += u" </field>\n" return xml class Subfield(object): """ Describes the properties of a MARC subfield """ def __init__(self, code, repeatable, description): self.code = code self.repeatable = repeatable self.description = description def to_xml(self): """ Convert the subfield to XML representation """ xml = u" <subfield code='%s' repeatable='%s'>\n" % (self.code, self.repeatable) xml += u" <description>%s</description>\n" % (self.description) xml += u" </subfield>\n" return xml class Indicator(object): """ Describes the properties of an indicator-value pair for a MARC field """ def __init__(self, position, value, description): self.position = position self.value = value self.description = description def to_xml(self): """ Convert the indicator-value pair to XML representation """ xml = u" <indicator position='%s' value='%s'>\n" % (self.position, self.value) xml += u" <description>%s</description>\n" % (self.description) xml += u" </indicator>\n" return xml def process_indicator(field, position, raw_ind): """ Given an XML chunk holding indicator data, append Indicator objects to a MARC field """ if (re.compile(r'indicateur\s*-\s*Non').search(raw_ind.contents[0])): return None if (not raw_ind.ul): print "No %d indicator for %s, although not not defined either..." % (position, field.tag) return None ind_values = raw_ind.ul.findAll('li') for value in ind_values: text = ''.join(value.findAll(text=True)) if (re.compile(u'non précisé').search(text)): continue matches = re.compile(r'^(\S(-\S)?)\s*-\s*(.+)$', re.S).search(text) if matches is None: continue new_ind = Indicator(position, matches.group(1).replace('\n', ' ').rstrip(), matches.group(3).replace('\n', ' ').rstrip()) field.indicators.append(new_ind) def process_subfield(field, subfield): """ Given an XML chunk holding subfield data, append a Subfield object to a MARC field """ repeatable = 'true' if (subfield.span): if (re.compile(r'\(R\)').search(subfield.span.renderContents())): repeatable = 'false' subfield.span.extract() elif (subfield.small): if (re.compile(r'\(R\)').search(subfield.small.renderContents())): repeatable = 'false' subfield.small.extract() else: print "%s has no small or span tags?" % (field.tag) subfield_text = re.compile(r'\n').sub(' ', ''.join(subfield.findAll(text=True))) matches = re.compile(r'^\$(\w)\s*-\s*(.+)$', re.S).search(subfield_text) if (not matches): print "No subfield match for field: " + field.tag return None field.subfields.append(Subfield(matches.group(1).replace('\n', ' ').rstrip(), repeatable, matches.group(2).replace('\n', ' ').rstrip())) def process_tag(tag): """ Given a chunk of XML representing a MARC field, generate a MarcField object """ repeatable = 'true' name = u'' description = u'' # Get tag tag_num = re.compile(r'^mrcb(\d+)').sub(r'\1', tag['id']) if (len(tag_num) != 3): return None # Get repeatable - most stored in <span>, some stored in <small> if (re.compile(r'\(NR\)').search(tag.renderContents())): repeatable = 'false' # Get name - stored in <h2> like: # <h2><a id="mrcb250">250 - Mention d'&#233;dition <span class="small">(NR)</span></a> name = re.compile(r'^.+?-\s*(.+)\s*\(.+$', re.S).sub(r'\1', ''.join(tag.findAll(text=True))) name = name.replace('\n', ' ').rstrip() # Get description desc = tag.parent.findNextSibling('p') if (not desc): print "No description for %s" % (tag_num) else: if (str(desc.__class__) == 'BeautifulSoup.Tag'): try: description += u''.join(desc.findAll(text=True)) except: print "Bad description for: " + tag_num print u' '.join(desc.findAll(text=True)) else: description += desc.string description = description.replace('\n', ' ').rstrip() # Create the tag field = MarcField(tag_num, name, repeatable, description) for desc in tag.parent.findNextSiblings(): if (str(desc.__class__) == 'BeautifulSoup.Tag'): if (desc.name == 'h2'): break elif (desc.name == 'h3' and re.compile(r'Indicateurs').search(desc.string)): # process indicators first_ind = desc.findNextSibling('ul').li second_ind = first_ind.findNextSibling('li') if (not second_ind): second_ind = first_ind.parent.findNextSibling('ul').li process_indicator(field, 1, first_ind) process_indicator(field, 2, second_ind) elif (desc.name == 'h3' and re.compile(r'Codes de sous').search(desc.string)): # Get subfields subfield = desc.findNextSibling('ul').li while (subfield): process_subfield(field, subfield) subfield = subfield.findNextSibling('li') return field if __name__ == '__main__': import codecs import copy import os import re import subprocess ALL_MY_FIELDS = MarcCollection() # Run through the LAC-BAC MARC files we care about and convert like crazy for filename in os.listdir('.'): if (not re.compile(r'^040010-1\d\d\d-f.html').search(filename)): continue print filename devnull = codecs.open('/dev/null', encoding='utf-8', mode='w') file = subprocess.Popen( ('tidy', '-asxml', '-n', '-q', '-utf8', filename), stdout=subprocess.PIPE, stderr=devnull).communicate()[0] # Strip out the hard spaces on our way through hardMassage = [(re.compile(r'&#160;'), lambda match: ' ')] myHardMassage = copy.copy(BeautifulSoup.MARKUP_MASSAGE) myHardMassage.extend(myHardMassage) filexml = BeautifulSoup(file, markupMassage=myHardMassage) tags = filexml.findAll('a', id=re.compile(r'^mrcb')) for tag in tags: field = process_tag(tag) if (field): ALL_MY_FIELDS.add_field(field) MARCOUT = codecs.open('marcedit-tooltips-fr.xml', encoding='utf-8', mode='w') MARCOUT.write(ALL_MY_FIELDS.to_xml().encode('UTF-8')) MARCOUT.close()
gpl-2.0
WhySoGeeky/DroidPot
venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/tests/test_resources.py
242
23622
import os import sys import tempfile import shutil import string import pytest import pkg_resources from pkg_resources import (parse_requirements, VersionConflict, parse_version, Distribution, EntryPoint, Requirement, safe_version, safe_name, WorkingSet) packaging = pkg_resources.packaging def safe_repr(obj, short=False): """ copied from Python2.7""" try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < pkg_resources._MAX_LENGTH: return result return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...' class Metadata(pkg_resources.EmptyProvider): """Mock object to return metadata as if from an on-disk distribution""" def __init__(self, *pairs): self.metadata = dict(pairs) def has_metadata(self, name): return name in self.metadata def get_metadata(self, name): return self.metadata[name] def get_metadata_lines(self, name): return pkg_resources.yield_lines(self.get_metadata(name)) dist_from_fn = pkg_resources.Distribution.from_filename class TestDistro: def testCollection(self): # empty path should produce no distributions ad = pkg_resources.Environment([], platform=None, python=None) assert list(ad) == [] assert ad['FooPkg'] == [] ad.add(dist_from_fn("FooPkg-1.3_1.egg")) ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg")) ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg")) # Name is in there now assert ad['FooPkg'] # But only 1 package assert list(ad) == ['foopkg'] # Distributions sort by version assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.3-1','1.2'] # Removing a distribution leaves sequence alone ad.remove(ad['FooPkg'][1]) assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.2'] # And inserting adds them in order ad.add(dist_from_fn("FooPkg-1.9.egg")) assert [dist.version for dist in ad['FooPkg']] == ['1.9','1.4','1.2'] ws = WorkingSet([]) foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg") foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg") req, = parse_requirements("FooPkg>=1.3") # Nominal case: no distros on path, should yield all applicable assert ad.best_match(req, ws).version == '1.9' # If a matching distro is already installed, should return only that ws.add(foo14) assert ad.best_match(req, ws).version == '1.4' # If the first matching distro is unsuitable, it's a version conflict ws = WorkingSet([]) ws.add(foo12) ws.add(foo14) with pytest.raises(VersionConflict): ad.best_match(req, ws) # If more than one match on the path, the first one takes precedence ws = WorkingSet([]) ws.add(foo14) ws.add(foo12) ws.add(foo14) assert ad.best_match(req, ws).version == '1.4' def checkFooPkg(self,d): assert d.project_name == "FooPkg" assert d.key == "foopkg" assert d.version == "1.3.post1" assert d.py_version == "2.4" assert d.platform == "win32" assert d.parsed_version == parse_version("1.3-1") def testDistroBasics(self): d = Distribution( "/some/path", project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" ) self.checkFooPkg(d) d = Distribution("/some/path") assert d.py_version == sys.version[:3] assert d.platform == None def testDistroParse(self): d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg") self.checkFooPkg(d) d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg-info") self.checkFooPkg(d) def testDistroMetadata(self): d = Distribution( "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", metadata = Metadata( ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") ) ) self.checkFooPkg(d) def distRequires(self, txt): return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) def checkRequires(self, dist, txt, extras=()): assert list(dist.requires(extras)) == list(parse_requirements(txt)) def testDistroDependsSimple(self): for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": self.checkRequires(self.distRequires(v), v) def testResolve(self): ad = pkg_resources.Environment([]) ws = WorkingSet([]) # Resolving no requirements -> nothing to install assert list(ws.resolve([], ad)) == [] # Request something not in the collection -> DistributionNotFound with pytest.raises(pkg_resources.DistributionNotFound): ws.resolve(parse_requirements("Foo"), ad) Foo = Distribution.from_filename( "/foo_dir/Foo-1.2.egg", metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) ) ad.add(Foo) ad.add(Distribution.from_filename("Foo-0.9.egg")) # Request thing(s) that are available -> list to activate for i in range(3): targets = list(ws.resolve(parse_requirements("Foo"), ad)) assert targets == [Foo] list(map(ws.add,targets)) with pytest.raises(VersionConflict): ws.resolve(parse_requirements("Foo==0.9"), ad) ws = WorkingSet([]) # reset # Request an extra that causes an unresolved dependency for "Baz" with pytest.raises(pkg_resources.DistributionNotFound): ws.resolve(parse_requirements("Foo[bar]"), ad) Baz = Distribution.from_filename( "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) ) ad.add(Baz) # Activation list now includes resolved dependency assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) ==[Foo,Baz] # Requests for conflicting versions produce VersionConflict with pytest.raises(VersionConflict) as vc: ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad) msg = 'Foo 0.9 is installed but Foo==1.2 is required' assert vc.value.report() == msg def testDistroDependsOptions(self): d = self.distRequires(""" Twisted>=1.5 [docgen] ZConfig>=2.0 docutils>=0.3 [fastcgi] fcgiapp>=0.1""") self.checkRequires(d,"Twisted>=1.5") self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), ["docgen","fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), ["fastcgi", "docgen"] ) with pytest.raises(pkg_resources.UnknownExtra): d.requires(["foo"]) class TestWorkingSet: def test_find_conflicting(self): ws = WorkingSet([]) Foo = Distribution.from_filename("/foo_dir/Foo-1.2.egg") ws.add(Foo) # create a requirement that conflicts with Foo 1.2 req = next(parse_requirements("Foo<1.2")) with pytest.raises(VersionConflict) as vc: ws.find(req) msg = 'Foo 1.2 is installed but Foo<1.2 is required' assert vc.value.report() == msg def test_resolve_conflicts_with_prior(self): """ A ContextualVersionConflict should be raised when a requirement conflicts with a prior requirement for a different package. """ # Create installation where Foo depends on Baz 1.0 and Bar depends on # Baz 2.0. ws = WorkingSet([]) md = Metadata(('depends.txt', "Baz==1.0")) Foo = Distribution.from_filename("/foo_dir/Foo-1.0.egg", metadata=md) ws.add(Foo) md = Metadata(('depends.txt', "Baz==2.0")) Bar = Distribution.from_filename("/foo_dir/Bar-1.0.egg", metadata=md) ws.add(Bar) Baz = Distribution.from_filename("/foo_dir/Baz-1.0.egg") ws.add(Baz) Baz = Distribution.from_filename("/foo_dir/Baz-2.0.egg") ws.add(Baz) with pytest.raises(VersionConflict) as vc: ws.resolve(parse_requirements("Foo\nBar\n")) msg = "Baz 1.0 is installed but Baz==2.0 is required by {'Bar'}" if pkg_resources.PY2: msg = msg.replace("{'Bar'}", "set(['Bar'])") assert vc.value.report() == msg class TestEntryPoints: def assertfields(self, ep): assert ep.name == "foo" assert ep.module_name == "pkg_resources.tests.test_resources" assert ep.attrs == ("TestEntryPoints",) assert ep.extras == ("x",) assert ep.load() is TestEntryPoints expect = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]" assert str(ep) == expect def setup_method(self, method): self.dist = Distribution.from_filename( "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) def testBasics(self): ep = EntryPoint( "foo", "pkg_resources.tests.test_resources", ["TestEntryPoints"], ["x"], self.dist ) self.assertfields(ep) def testParse(self): s = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]" ep = EntryPoint.parse(s, self.dist) self.assertfields(ep) ep = EntryPoint.parse("bar baz= spammity[PING]") assert ep.name == "bar baz" assert ep.module_name == "spammity" assert ep.attrs == () assert ep.extras == ("ping",) ep = EntryPoint.parse(" fizzly = wocka:foo") assert ep.name == "fizzly" assert ep.module_name == "wocka" assert ep.attrs == ("foo",) assert ep.extras == () # plus in the name spec = "html+mako = mako.ext.pygmentplugin:MakoHtmlLexer" ep = EntryPoint.parse(spec) assert ep.name == 'html+mako' reject_specs = "foo", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2" @pytest.mark.parametrize("reject_spec", reject_specs) def test_reject_spec(self, reject_spec): with pytest.raises(ValueError): EntryPoint.parse(reject_spec) def test_printable_name(self): """ Allow any printable character in the name. """ # Create a name with all printable characters; strip the whitespace. name = string.printable.strip() spec = "{name} = module:attr".format(**locals()) ep = EntryPoint.parse(spec) assert ep.name == name def checkSubMap(self, m): assert len(m) == len(self.submap_expect) for key, ep in pkg_resources.iteritems(self.submap_expect): assert repr(m.get(key)) == repr(ep) submap_expect = dict( feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), feature3=EntryPoint('feature3', 'this.module', extras=['something']) ) submap_str = """ # define features for blah blah feature1 = somemodule:somefunction feature2 = another.module:SomeClass [extra1,extra2] feature3 = this.module [something] """ def testParseList(self): self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) with pytest.raises(ValueError): EntryPoint.parse_group("x a", "foo=bar") with pytest.raises(ValueError): EntryPoint.parse_group("x", ["foo=baz", "foo=bar"]) def testParseMap(self): m = EntryPoint.parse_map({'xyz':self.submap_str}) self.checkSubMap(m['xyz']) assert list(m.keys()) == ['xyz'] m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) self.checkSubMap(m['xyz']) assert list(m.keys()) == ['xyz'] with pytest.raises(ValueError): EntryPoint.parse_map(["[xyz]", "[xyz]"]) with pytest.raises(ValueError): EntryPoint.parse_map(self.submap_str) class TestRequirements: def testBasics(self): r = Requirement.parse("Twisted>=1.2") assert str(r) == "Twisted>=1.2" assert repr(r) == "Requirement.parse('Twisted>=1.2')" assert r == Requirement("Twisted", [('>=','1.2')], ()) assert r == Requirement("twisTed", [('>=','1.2')], ()) assert r != Requirement("Twisted", [('>=','2.0')], ()) assert r != Requirement("Zope", [('>=','1.2')], ()) assert r != Requirement("Zope", [('>=','3.0')], ()) assert r != Requirement.parse("Twisted[extras]>=1.2") def testOrdering(self): r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) assert r1 == r2 assert str(r1) == str(r2) assert str(r2) == "Twisted==1.2c1,>=1.2" def testBasicContains(self): r = Requirement("Twisted", [('>=','1.2')], ()) foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") twist11 = Distribution.from_filename("Twisted-1.1.egg") twist12 = Distribution.from_filename("Twisted-1.2.egg") assert parse_version('1.2') in r assert parse_version('1.1') not in r assert '1.2' in r assert '1.1' not in r assert foo_dist not in r assert twist11 not in r assert twist12 in r def testOptionsAndHashing(self): r1 = Requirement.parse("Twisted[foo,bar]>=1.2") r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") assert r1 == r2 assert r1.extras == ("foo","bar") assert r2.extras == ("bar","foo") # extras are normalized assert hash(r1) == hash(r2) assert ( hash(r1) == hash(( "twisted", packaging.specifiers.SpecifierSet(">=1.2"), frozenset(["foo","bar"]), )) ) def testVersionEquality(self): r1 = Requirement.parse("foo==0.3a2") r2 = Requirement.parse("foo!=0.3a4") d = Distribution.from_filename assert d("foo-0.3a4.egg") not in r1 assert d("foo-0.3a1.egg") not in r1 assert d("foo-0.3a4.egg") not in r2 assert d("foo-0.3a2.egg") in r1 assert d("foo-0.3a2.egg") in r2 assert d("foo-0.3a3.egg") in r2 assert d("foo-0.3a5.egg") in r2 def testSetuptoolsProjectName(self): """ The setuptools project should implement the setuptools package. """ assert ( Requirement.parse('setuptools').project_name == 'setuptools') # setuptools 0.7 and higher means setuptools. assert ( Requirement.parse('setuptools == 0.7').project_name == 'setuptools') assert ( Requirement.parse('setuptools == 0.7a1').project_name == 'setuptools') assert ( Requirement.parse('setuptools >= 0.7').project_name == 'setuptools') class TestParsing: def testEmptyParse(self): assert list(parse_requirements('')) == [] def testYielding(self): for inp,out in [ ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), (['x\n\n','y'], ['x','y']), ]: assert list(pkg_resources.yield_lines(inp)) == out def testSplitting(self): sample = """ x [Y] z a [b ] # foo c [ d] [q] v """ assert ( list(pkg_resources.split_sections(sample)) == [ (None, ["x"]), ("Y", ["z", "a"]), ("b", ["c"]), ("d", []), ("q", ["v"]), ] ) with pytest.raises(ValueError): list(pkg_resources.split_sections("[foo")) def testSafeName(self): assert safe_name("adns-python") == "adns-python" assert safe_name("WSGI Utils") == "WSGI-Utils" assert safe_name("WSGI Utils") == "WSGI-Utils" assert safe_name("Money$$$Maker") == "Money-Maker" assert safe_name("peak.web") != "peak-web" def testSafeVersion(self): assert safe_version("1.2-1") == "1.2.post1" assert safe_version("1.2 alpha") == "1.2.alpha" assert safe_version("2.3.4 20050521") == "2.3.4.20050521" assert safe_version("Money$$$Maker") == "Money-Maker" assert safe_version("peak.web") == "peak.web" def testSimpleRequirements(self): assert ( list(parse_requirements('Twis-Ted>=1.2-1')) == [Requirement('Twis-Ted',[('>=','1.2-1')], ())] ) assert ( list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')) == [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] ) assert ( Requirement.parse("FooBar==1.99a3") == Requirement("FooBar", [('==','1.99a3')], ()) ) with pytest.raises(ValueError): Requirement.parse(">=2.3") with pytest.raises(ValueError): Requirement.parse("x\\") with pytest.raises(ValueError): Requirement.parse("x==2 q") with pytest.raises(ValueError): Requirement.parse("X==1\nY==2") with pytest.raises(ValueError): Requirement.parse("#") def testVersionEquality(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) assert p1 == p2, (s1,s2,p1,p2) c('1.2-rc1', '1.2rc1') c('0.4', '0.4.0') c('0.4.0.0', '0.4.0') c('0.4.0-0', '0.4-0') c('0post1', '0.0post1') c('0pre1', '0.0c1') c('0.0.0preview1', '0c1') c('0.0c1', '0-rc1') c('1.2a1', '1.2.a.1') c('1.2.a', '1.2a') def testVersionOrdering(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) assert p1<p2, (s1,s2,p1,p2) c('2.1','2.1.1') c('2a1','2b0') c('2a1','2.1') c('2.3a1', '2.3') c('2.1-1', '2.1-2') c('2.1-1', '2.1.1') c('2.1', '2.1post4') c('2.1a0-20040501', '2.1') c('1.1', '02.1') c('3.2', '3.2.post0') c('3.2post1', '3.2post2') c('0.4', '4.0') c('0.0.4', '0.4.0') c('0post1', '0.4post1') c('2.1.0-rc1','2.1.0') c('2.1dev','2.1a0') torture =""" 0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1 0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2 0.77.2-1 0.77.1-1 0.77.0-1 """.split() for p,v1 in enumerate(torture): for v2 in torture[p+1:]: c(v2,v1) def testVersionBuildout(self): """ Buildout has a function in it's bootstrap.py that inspected the return value of parse_version. The new parse_version returns a Version class which needs to support this behavior, at least for now. """ def buildout(parsed_version): _final_parts = '*final-', '*final' def _final_version(parsed_version): for part in parsed_version: if (part[:1] == '*') and (part not in _final_parts): return False return True return _final_version(parsed_version) assert buildout(parse_version("1.0")) assert not buildout(parse_version("1.0a1")) def testVersionIndexable(self): """ Some projects were doing things like parse_version("v")[0], so we'll support indexing the same as we support iterating. """ assert parse_version("1.0")[0] == "00000001" def testVersionTupleSort(self): """ Some projects expected to be able to sort tuples against the return value of parse_version. So again we'll add a warning enabled shim to make this possible. """ assert parse_version("1.0") < tuple(parse_version("2.0")) assert parse_version("1.0") <= tuple(parse_version("2.0")) assert parse_version("1.0") == tuple(parse_version("1.0")) assert parse_version("3.0") > tuple(parse_version("2.0")) assert parse_version("3.0") >= tuple(parse_version("2.0")) assert parse_version("3.0") != tuple(parse_version("2.0")) assert not (parse_version("3.0") != tuple(parse_version("3.0"))) def testVersionHashable(self): """ Ensure that our versions stay hashable even though we've subclassed them and added some shim code to them. """ assert ( hash(parse_version("1.0")) == hash(parse_version("1.0")) ) class TestNamespaces: def setup_method(self, method): self._ns_pkgs = pkg_resources._namespace_packages.copy() self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-") os.makedirs(os.path.join(self._tmpdir, "site-pkgs")) self._prev_sys_path = sys.path[:] sys.path.append(os.path.join(self._tmpdir, "site-pkgs")) def teardown_method(self, method): shutil.rmtree(self._tmpdir) pkg_resources._namespace_packages = self._ns_pkgs.copy() sys.path = self._prev_sys_path[:] @pytest.mark.skipif(os.path.islink(tempfile.gettempdir()), reason="Test fails when /tmp is a symlink. See #231") def test_two_levels_deep(self): """ Test nested namespace packages Create namespace packages in the following tree : site-packages-1/pkg1/pkg2 site-packages-2/pkg1/pkg2 Check both are in the _namespace_packages dict and that their __path__ is correct """ sys.path.append(os.path.join(self._tmpdir, "site-pkgs2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")) ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n" for site in ["site-pkgs", "site-pkgs2"]: pkg1_init = open(os.path.join(self._tmpdir, site, "pkg1", "__init__.py"), "w") pkg1_init.write(ns_str) pkg1_init.close() pkg2_init = open(os.path.join(self._tmpdir, site, "pkg1", "pkg2", "__init__.py"), "w") pkg2_init.write(ns_str) pkg2_init.close() import pkg1 assert "pkg1" in pkg_resources._namespace_packages # attempt to import pkg2 from site-pkgs2 import pkg1.pkg2 # check the _namespace_packages dict assert "pkg1.pkg2" in pkg_resources._namespace_packages assert pkg_resources._namespace_packages["pkg1"] == ["pkg1.pkg2"] # check the __path__ attribute contains both paths expected = [ os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"), os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"), ] assert pkg1.pkg2.__path__ == expected
mit
Hodorable/0602
openstack_dashboard/dashboards/project/loadbalancers/tests.py
23
36421
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mox import IsA # noqa from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django import http from horizon.workflows import views from openstack_dashboard import api from openstack_dashboard.test import helpers as test from openstack_dashboard.dashboards.project.loadbalancers import workflows class LoadBalancerTests(test.TestCase): class AttributeDict(dict): def __getattr__(self, attr): return self[attr] def __setattr__(self, attr, value): self[attr] = value DASHBOARD = 'project' INDEX_URL = reverse_lazy('horizon:%s:loadbalancers:index' % DASHBOARD) ADDPOOL_PATH = 'horizon:%s:loadbalancers:addpool' % DASHBOARD ADDVIP_PATH = 'horizon:%s:loadbalancers:addvip' % DASHBOARD ADDMEMBER_PATH = 'horizon:%s:loadbalancers:addmember' % DASHBOARD ADDMONITOR_PATH = 'horizon:%s:loadbalancers:addmonitor' % DASHBOARD POOL_DETAIL_PATH = 'horizon:%s:loadbalancers:pooldetails' % DASHBOARD VIP_DETAIL_PATH = 'horizon:%s:loadbalancers:vipdetails' % DASHBOARD MEMBER_DETAIL_PATH = 'horizon:%s:loadbalancers:memberdetails' % DASHBOARD MONITOR_DETAIL_PATH = 'horizon:%s:loadbalancers:monitordetails' % DASHBOARD UPDATEPOOL_PATH = 'horizon:%s:loadbalancers:updatepool' % DASHBOARD UPDATEVIP_PATH = 'horizon:%s:loadbalancers:updatevip' % DASHBOARD UPDATEMEMBER_PATH = 'horizon:%s:loadbalancers:updatemember' % DASHBOARD UPDATEMONITOR_PATH = 'horizon:%s:loadbalancers:updatemonitor' % DASHBOARD ADDASSOC_PATH = 'horizon:%s:loadbalancers:addassociation' % DASHBOARD DELETEASSOC_PATH = 'horizon:%s:loadbalancers:deleteassociation' % DASHBOARD def set_up_expect(self): # retrieve pools api.lbaas.pool_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) # retrieves members api.lbaas.member_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.members.list()) # retrieves monitors api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest), tenant_id=self.tenant.id).MultipleTimes() \ .AndReturn(self.monitors.list()) def set_up_expect_with_exception(self): api.lbaas.pool_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndRaise(self.exceptions.neutron) api.lbaas.member_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndRaise(self.exceptions.neutron) api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndRaise(self.exceptions.neutron) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_pools(self): self.set_up_expect() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL) self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['table'].data), len(self.pools.list())) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_members(self): self.set_up_expect() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members') self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['memberstable_table'].data), len(self.members.list())) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_monitors(self): self.set_up_expect() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors') self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['monitorstable_table'].data), len(self.monitors.list())) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_exception_pools(self): self.set_up_expect_with_exception() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL) self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['table'].data), 0) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_exception_members(self): self.set_up_expect_with_exception() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members') self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['memberstable_table'].data), 0) @test.create_stubs({api.lbaas: ('pool_list', 'member_list', 'pool_health_monitor_list')}) def test_index_exception_monitors(self): self.set_up_expect_with_exception() self.mox.ReplayAll() res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors') self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html' % self.DASHBOARD) self.assertTemplateUsed(res, 'horizon/common/_detail_table.html') self.assertEqual(len(res.context['monitorstable_table'].data), 0) @test.create_stubs({api.neutron: ('network_list_for_tenant', 'provider_list', 'is_extension_supported'), api.lbaas: ('pool_create', )}) def test_add_pool_post(self): pool = self.pools.first() subnet = self.subnets.first() networks = [{'subnets': [subnet, ]}, ] api.neutron.is_extension_supported( IsA(http.HttpRequest), 'service-type').AndReturn(True) api.neutron.network_list_for_tenant( IsA(http.HttpRequest), self.tenant.id).AndReturn(networks) api.neutron.provider_list(IsA(http.HttpRequest)) \ .AndReturn(self.providers.list()) form_data = {'name': pool.name, 'description': pool.description, 'subnet_id': pool.subnet_id, 'protocol': pool.protocol, 'lb_method': pool.lb_method, 'admin_state_up': pool.admin_state_up} api.lbaas.pool_create( IsA(http.HttpRequest), **form_data).AndReturn(pool) self.mox.ReplayAll() res = self.client.post(reverse(self.ADDPOOL_PATH), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.neutron: ('network_list_for_tenant', 'provider_list', 'is_extension_supported')}) def test_add_pool_get(self): self._test_add_pool_get() @test.create_stubs({api.neutron: ('network_list_for_tenant', 'provider_list', 'is_extension_supported')}) def test_add_pool_get_provider_list_exception(self): self._test_add_pool_get(with_provider_exception=True) @test.create_stubs({api.neutron: ('network_list_for_tenant', 'is_extension_supported')}) def test_add_pool_get_without_service_type_support(self): self._test_add_pool_get(with_service_type=False) def _test_add_pool_get(self, with_service_type=True, with_provider_exception=False): subnet = self.subnets.first() default_provider = self.providers.first()['name'] networks = [{'subnets': [subnet, ]}, ] api.neutron.is_extension_supported( IsA(http.HttpRequest), 'service-type').AndReturn(with_service_type) api.neutron.network_list_for_tenant( IsA(http.HttpRequest), self.tenant.id).AndReturn(networks) if with_service_type: prov_list = api.neutron.provider_list(IsA(http.HttpRequest)) if with_provider_exception: prov_list.AndRaise(self.exceptions.neutron) else: prov_list.AndReturn(self.providers.list()) self.mox.ReplayAll() res = self.client.get(reverse(self.ADDPOOL_PATH)) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.AddPool.name) expected_objs = ['<AddPoolStep: addpoolaction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) if not with_service_type: self.assertNotContains(res, default_provider) self.assertContains(res, ('Provider for Load Balancer ' 'is not supported')) elif with_provider_exception: self.assertNotContains(res, default_provider) self.assertContains(res, 'No provider is available') else: self.assertContains(res, default_provider) def test_add_vip_post(self): self._test_add_vip_post() def test_add_vip_post_no_connection_limit(self): self._test_add_vip_post(with_conn_limit=False) def test_add_vip_post_with_diff_subnet(self): self._test_add_vip_post(with_diff_subnet=True) @test.create_stubs({api.lbaas: ('pool_get', 'vip_create'), api.neutron: ( 'network_list_for_tenant', 'subnet_get', )}) def _test_add_vip_post(self, with_diff_subnet=False, with_conn_limit=True): vip = self.vips.first() subnet = self.subnets.first() pool = self.pools.first() networks = [{'subnets': [subnet, ]}, ] api.lbaas.pool_get( IsA(http.HttpRequest), pool.id).MultipleTimes().AndReturn(pool) api.neutron.subnet_get( IsA(http.HttpRequest), subnet.id).AndReturn(subnet) api.neutron.network_list_for_tenant( IsA(http.HttpRequest), self.tenant.id).AndReturn(networks) params = {'name': vip.name, 'description': vip.description, 'pool_id': vip.pool_id, 'address': vip.address, 'subnet_id': pool.subnet_id, 'protocol_port': vip.protocol_port, 'protocol': vip.protocol, 'session_persistence': vip.session_persistence['type'], 'cookie_name': vip.session_persistence['cookie_name'], 'admin_state_up': vip.admin_state_up, } if with_conn_limit: params['connection_limit'] = vip.connection_limit if with_diff_subnet: params['subnet_id'] = vip.subnet_id api.lbaas.vip_create( IsA(http.HttpRequest), **params).AndReturn(vip) self.mox.ReplayAll() form_data = { 'name': vip.name, 'description': vip.description, 'pool_id': vip.pool_id, 'address': vip.address, 'subnet_id': pool.subnet_id, 'protocol_port': vip.protocol_port, 'protocol': vip.protocol, 'session_persistence': vip.session_persistence['type'].lower(), 'cookie_name': vip.session_persistence['cookie_name'], 'admin_state_up': vip.admin_state_up} if with_conn_limit: form_data['connection_limit'] = vip.connection_limit if with_diff_subnet: params['subnet_id'] = vip.subnet_id res = self.client.post( reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_get', ), api.neutron: ( 'network_list_for_tenant', 'subnet_get', )}) def test_add_vip_post_with_error(self): vip = self.vips.first() subnet = self.subnets.first() pool = self.pools.first() networks = [{'subnets': [subnet, ]}, ] api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.neutron.subnet_get( IsA(http.HttpRequest), subnet.id).AndReturn(subnet) api.neutron.network_list_for_tenant( IsA(http.HttpRequest), self.tenant.id).AndReturn(networks) self.mox.ReplayAll() form_data = { 'name': vip.name, 'description': vip.description, 'pool_id': vip.pool_id, 'address': vip.address, 'subnet_id': pool.subnet_id, 'protocol_port': 65536, 'protocol': vip.protocol, 'session_persistence': vip.session_persistence['type'].lower(), 'cookie_name': vip.session_persistence['cookie_name'], 'connection_limit': -2, 'admin_state_up': vip.admin_state_up} res = self.client.post( reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data) self.assertFormErrors(res, 2) def test_add_vip_get(self): self._test_add_vip_get() def test_add_vip_get_with_diff_subnet(self): self._test_add_vip_get(with_diff_subnet=True) @test.create_stubs({api.lbaas: ('pool_get', ), api.neutron: ( 'network_list_for_tenant', 'subnet_get', )}) def _test_add_vip_get(self, with_diff_subnet=False): subnet = self.subnets.first() pool = self.pools.first() networks = [{'subnets': [subnet, ]}, ] api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.neutron.subnet_get( IsA(http.HttpRequest), subnet.id).AndReturn(subnet) api.neutron.network_list_for_tenant( IsA(http.HttpRequest), self.tenant.id).AndReturn(networks) self.mox.ReplayAll() res = self.client.get(reverse(self.ADDVIP_PATH, args=(pool.id,))) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.AddVip.name) expected_objs = ['<AddVipStep: addvipaction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) if with_diff_subnet: self.assertNotEqual(networks[0], pool.subnet_id) @test.create_stubs({api.lbaas: ('pool_health_monitor_create', )}) def test_add_monitor_post(self): monitor = self.monitors.first() form_data = {'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'max_retries': monitor.max_retries, 'http_method': monitor.http_method, 'url_path': monitor.url_path, 'expected_codes': monitor.expected_codes, 'admin_state_up': monitor.admin_state_up} api.lbaas.pool_health_monitor_create( IsA(http.HttpRequest), **form_data).AndReturn(monitor) self.mox.ReplayAll() res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) def test_add_monitor_post_with_error(self): monitor = self.monitors.first() form_data = {'type': monitor.type, 'delay': 0, 'timeout': 0, 'max_retries': 11, 'http_method': monitor.http_method, 'url_path': monitor.url_path, 'expected_codes': monitor.expected_codes, 'admin_state_up': monitor.admin_state_up} res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data) self.assertFormErrors(res, 3) def test_add_monitor_post_with_httpmethod_error(self): monitor = self.monitors.first() form_data = {'type': 'http', 'delay': monitor.delay, 'timeout': monitor.timeout, 'max_retries': monitor.max_retries, 'http_method': '', 'url_path': '', 'expected_codes': '', 'admin_state_up': monitor.admin_state_up} res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data) self.assertFormErrors(res, 3) def test_add_monitor_get(self): res = self.client.get(reverse(self.ADDMONITOR_PATH)) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.AddMonitor.name) expected_objs = ['<AddMonitorStep: addmonitoraction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) def test_add_member_post(self): self._test_add_member_post() def test_add_member_post_without_weight(self): self._test_add_member_post(with_weight=False) def test_add_member_post_without_server_list(self): self._test_add_member_post(with_server_list=False) def test_add_member_post_multiple_ports(self): self._test_add_member_post(mult_ports=True) @test.create_stubs({api.lbaas: ('pool_list', 'pool_get', 'member_create'), api.neutron: ('port_list',), api.nova: ('server_list',)}) def _test_add_member_post(self, with_weight=True, with_server_list=True, mult_ports=False): member = self.members.first() server1 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338042e', 'name': 'vm1'}) server2 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338043e', 'name': 'vm2'}) api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.nova.server_list(IsA(http.HttpRequest)).AndReturn( [[server1, server2], False]) if with_server_list: pool = self.pools.list()[1] port1 = self.AttributeDict( {'fixed_ips': [{'ip_address': member.address, 'subnet_id': 'e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'}], 'network_id': '82288d84-e0a5-42ac-95be-e6af08727e42'}) api.lbaas.pool_get( IsA(http.HttpRequest), pool.id).AndReturn(pool) if mult_ports: port2 = self.AttributeDict( {'fixed_ips': [{'ip_address': '172.16.88.12', 'subnet_id': '3f7c5d79-ee55-47b0-9213-8e669fb03009'}], 'network_id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2'}) api.neutron.port_list( IsA(http.HttpRequest), device_id=server1.id).AndReturn([port1, port2]) else: api.neutron.port_list( IsA(http.HttpRequest), device_id=server1.id).AndReturn([port1, ]) form_data = {'pool_id': member.pool_id, 'protocol_port': member.protocol_port, 'members': [server1.id], 'admin_state_up': member.admin_state_up} if with_weight: form_data['weight'] = member.weight if with_server_list: form_data['member_type'] = 'server_list' else: form_data['member_type'] = 'member_address' form_data['address'] = member.address api.lbaas.member_create(IsA(http.HttpRequest), **form_data).AndReturn(member) self.mox.ReplayAll() res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_list',), api.nova: ('server_list',)}) def test_add_member_post_with_error(self): member = self.members.first() server1 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338042e', 'name': 'vm1'}) server2 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338043e', 'name': 'vm2'}) api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[server1, server2], False]) self.mox.ReplayAll() # try to create member with invalid protocol port and weight form_data = {'pool_id': member.pool_id, 'address': member.address, 'protocol_port': 65536, 'weight': -1, 'members': [server1.id], 'admin_state_up': member.admin_state_up} res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data) self.assertFormErrors(res, 2) @test.create_stubs({api.lbaas: ('pool_list',), api.nova: ('server_list',)}) def test_add_member_get(self): server1 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338042e', 'name': 'vm1'}) server2 = self.AttributeDict({'id': '12381d38-c3eb-4fee-9763-12de3338043e', 'name': 'vm2'}) api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.nova.server_list( IsA(http.HttpRequest)).AndReturn([[server1, server2], False]) self.mox.ReplayAll() res = self.client.get(reverse(self.ADDMEMBER_PATH)) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.AddMember.name) expected_objs = ['<AddMemberStep: addmemberaction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) @test.create_stubs({api.lbaas: ('pool_get', 'pool_update')}) def test_update_pool_post(self): pool = self.pools.first() api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) data = {'name': pool.name, 'description': pool.description, 'lb_method': pool.lb_method, 'admin_state_up': pool.admin_state_up} api.lbaas.pool_update(IsA(http.HttpRequest), pool.id, pool=data)\ .AndReturn(pool) self.mox.ReplayAll() form_data = data.copy() form_data['pool_id'] = pool.id res = self.client.post( reverse(self.UPDATEPOOL_PATH, args=(pool.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_get',)}) def test_update_pool_get(self): pool = self.pools.first() api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) self.mox.ReplayAll() res = self.client.get(reverse(self.UPDATEPOOL_PATH, args=(pool.id,))) self.assertTemplateUsed(res, 'project/loadbalancers/updatepool.html') @test.create_stubs({api.lbaas: ('pool_list', 'vip_get', 'vip_update')}) def test_update_vip_post(self): vip = self.vips.first() api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip) data = {'name': vip.name, 'description': vip.description, 'pool_id': vip.pool_id, 'session_persistence': {}, 'connection_limit': vip.connection_limit, 'admin_state_up': vip.admin_state_up} api.lbaas.vip_update(IsA(http.HttpRequest), vip.id, vip=data)\ .AndReturn(vip) self.mox.ReplayAll() form_data = data.copy() form_data['vip_id'] = vip.id res = self.client.post( reverse(self.UPDATEVIP_PATH, args=(vip.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('vip_get', 'pool_list')}) def test_update_vip_get(self): vip = self.vips.first() api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip) self.mox.ReplayAll() res = self.client.get(reverse(self.UPDATEVIP_PATH, args=(vip.id,))) self.assertTemplateUsed(res, 'project/loadbalancers/updatevip.html') @test.create_stubs({api.lbaas: ('pool_list', 'member_get', 'member_update')}) def test_update_member_post(self): member = self.members.first() api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.member_get(IsA(http.HttpRequest), member.id)\ .AndReturn(member) data = {'pool_id': member.pool_id, 'weight': member.weight, 'admin_state_up': member.admin_state_up} api.lbaas.member_update(IsA(http.HttpRequest), member.id, member=data)\ .AndReturn(member) self.mox.ReplayAll() form_data = data.copy() form_data['member_id'] = member.id res = self.client.post( reverse(self.UPDATEMEMBER_PATH, args=(member.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('member_get', 'pool_list')}) def test_update_member_get(self): member = self.members.first() api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.member_get(IsA(http.HttpRequest), member.id)\ .AndReturn(member) self.mox.ReplayAll() res = self.client.get( reverse(self.UPDATEMEMBER_PATH, args=(member.id,))) self.assertTemplateUsed(res, 'project/loadbalancers/updatemember.html') @test.create_stubs({api.lbaas: ('pool_health_monitor_get', 'pool_health_monitor_update')}) def test_update_monitor_post(self): monitor = self.monitors.first() api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\ .AndReturn(monitor) data = {'delay': monitor.delay, 'timeout': monitor.timeout, 'max_retries': monitor.max_retries, 'admin_state_up': monitor.admin_state_up} api.lbaas.pool_health_monitor_update( IsA(http.HttpRequest), monitor.id, health_monitor=data).AndReturn(monitor) self.mox.ReplayAll() form_data = data.copy() form_data['monitor_id'] = monitor.id res = self.client.post( reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_health_monitor_get',)}) def test_update_monitor_get(self): monitor = self.monitors.first() api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\ .AndReturn(monitor) self.mox.ReplayAll() res = self.client.get( reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,))) self.assertTemplateUsed( res, 'project/loadbalancers/updatemonitor.html') @test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list', 'pool_monitor_association_create')}) def test_add_pool_monitor_association_post(self): pool = self.pools.list()[1] monitors = self.monitors.list() monitor = self.monitors.list()[1] api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest), tenant_id=self.tenant.id).AndReturn(monitors) form_data = {'monitor_id': monitor.id, 'pool_id': pool.id, 'pool_monitors': pool.health_monitors, 'pool_name': pool.name} api.lbaas.pool_monitor_association_create( IsA(http.HttpRequest), **form_data).AndReturn(None) self.mox.ReplayAll() res = self.client.post( reverse(self.ADDASSOC_PATH, args=(pool.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list')}) def test_add_pool_monitor_association_get(self): pool = self.pools.first() monitors = self.monitors.list() api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest), tenant_id=self.tenant.id).AndReturn(monitors) self.mox.ReplayAll() res = self.client.get(reverse(self.ADDASSOC_PATH, args=(pool.id,))) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.AddPMAssociation.name) expected_objs = ['<AddPMAssociationStep: addpmassociationaction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) @test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list', 'pool_monitor_association_delete')}) def test_delete_pool_monitor_association_post(self): pool = self.pools.first() monitors = self.monitors.list() monitor = monitors[0] api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest)).AndReturn(monitors) form_data = {'monitor_id': monitor.id, 'pool_id': pool.id, 'pool_monitors': pool.health_monitors, 'pool_name': pool.name} api.lbaas.pool_monitor_association_delete( IsA(http.HttpRequest), **form_data).AndReturn(None) self.mox.ReplayAll() res = self.client.post( reverse(self.DELETEASSOC_PATH, args=(pool.id,)), form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, str(self.INDEX_URL)) @test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list')}) def test_delete_pool_monitor_association_get(self): pool = self.pools.first() monitors = self.monitors.list() api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest)).AndReturn(monitors) self.mox.ReplayAll() res = self.client.get( reverse(self.DELETEASSOC_PATH, args=(pool.id,))) workflow = res.context['workflow'] self.assertTemplateUsed(res, views.WorkflowView.template_name) self.assertEqual(workflow.name, workflows.DeletePMAssociation.name) expected_objs = [ '<DeletePMAssociationStep: deletepmassociationaction>', ] self.assertQuerysetEqual(workflow.steps, expected_objs) @test.create_stubs({api.lbaas: ('pool_list', 'pool_delete')}) def test_delete_pool(self): pool = self.pools.first() api.lbaas.pool_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.pool_delete(IsA(http.HttpRequest), pool.id) self.mox.ReplayAll() form_data = {"action": "poolstable__deletepool__%s" % pool.id} res = self.client.post(self.INDEX_URL, form_data) self.assertNoFormErrors(res) @test.create_stubs({api.lbaas: ('pool_list', 'pool_get', 'vip_delete')}) def test_delete_vip(self): pool = self.pools.first() vip = self.vips.first() api.lbaas.pool_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.pools.list()) api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool) api.lbaas.vip_delete(IsA(http.HttpRequest), vip.id) self.mox.ReplayAll() form_data = {"action": "poolstable__deletevip__%s" % pool.id} res = self.client.post(self.INDEX_URL, form_data) self.assertNoFormErrors(res) @test.create_stubs({api.lbaas: ('member_list', 'member_delete')}) def test_delete_member(self): member = self.members.first() api.lbaas.member_list( IsA(http.HttpRequest), tenant_id=self.tenant.id) \ .AndReturn(self.members.list()) api.lbaas.member_delete(IsA(http.HttpRequest), member.id) self.mox.ReplayAll() form_data = {"action": "memberstable__deletemember__%s" % member.id} res = self.client.post(self.INDEX_URL, form_data) self.assertNoFormErrors(res) @test.create_stubs({api.lbaas: ('pool_health_monitor_list', 'pool_health_monitor_delete')}) def test_delete_monitor(self): monitor = self.monitors.first() api.lbaas.pool_health_monitor_list( IsA(http.HttpRequest), tenant_id=self.tenant.id).MultipleTimes() \ .AndReturn(self.monitors.list()) api.lbaas.pool_health_monitor_delete(IsA(http.HttpRequest), monitor.id) self.mox.ReplayAll() form_data = {"action": "monitorstable__deletemonitor__%s" % monitor.id} res = self.client.post(self.INDEX_URL, form_data) self.assertNoFormErrors(res)
apache-2.0
alqfahad/odoo
addons/purchase/wizard/purchase_order_group.py
376
3379
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class purchase_order_group(osv.osv_memory): _name = "purchase.order.group" _description = "Purchase Order Merge" def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): """ Changes the view dynamically @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return: New arch of view. """ if context is None: context={} res = super(purchase_order_group, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False) if context.get('active_model','') == 'purchase.order' and len(context['active_ids']) < 2: raise osv.except_osv(_('Warning!'), _('Please select multiple order to merge in the list view.')) return res def merge_orders(self, cr, uid, ids, context=None): """ To merge similar type of purchase orders. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: the ID or list of IDs @param context: A standard dictionary @return: purchase order view """ order_obj = self.pool.get('purchase.order') proc_obj = self.pool.get('procurement.order') mod_obj =self.pool.get('ir.model.data') if context is None: context = {} result = mod_obj._get_id(cr, uid, 'purchase', 'view_purchase_order_filter') id = mod_obj.read(cr, uid, result, ['res_id']) allorders = order_obj.do_merge(cr, uid, context.get('active_ids',[]), context) return { 'domain': "[('id','in', [" + ','.join(map(str, allorders.keys())) + "])]", 'name': _('Purchase Orders'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'purchase.order', 'view_id': False, 'type': 'ir.actions.act_window', 'search_view_id': id['res_id'] } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
xingyepei/edx-platform
common/djangoapps/status/tests.py
115
2136
# -*- coding: utf-8 -*- """ Tests for setting and displaying the site status message. """ import ddt import unittest from django.test import TestCase from django.core.cache import cache from django.conf import settings from opaque_keys.edx.locations import CourseLocator from .status import get_site_status_msg from .models import GlobalStatusMessage, CourseMessage @ddt.ddt class TestStatus(TestCase): """Test that the get_site_status_msg function does the right thing""" def setUp(self): super(TestStatus, self).setUp() # Clear the cache between test runs. cache.clear() self.course_key = CourseLocator(org='TestOrg', course='TestCourse', run='TestRun') @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') @ddt.data( ("Test global message", "Test course message"), (u" Ŧɇsŧ sŧȺŧᵾs", u"Ṫëṡẗ ċöüṛṡë ṡẗäẗüṡ "), (u"", u"Ṫëṡẗ ċöüṛṡë ṡẗäẗüṡ "), (u" Ŧɇsŧ sŧȺŧᵾs", u""), ) @ddt.unpack def test_get_site_status_msg(self, test_global_message, test_course_message): """Test status messages in a variety of situations.""" # When we don't have any data set. self.assertEqual(get_site_status_msg(None), None) self.assertEqual(get_site_status_msg(self.course_key), None) msg = GlobalStatusMessage.objects.create(message=test_global_message, enabled=True) msg.save() self.assertEqual(get_site_status_msg(None), test_global_message) course_msg = CourseMessage.objects.create( global_message=msg, message=test_course_message, course_key=self.course_key ) course_msg.save() self.assertEqual( get_site_status_msg(self.course_key), u"{} <br /> {}".format(test_global_message, test_course_message) ) msg = GlobalStatusMessage.objects.create(message="", enabled=False) msg.save() self.assertEqual(get_site_status_msg(None), None) self.assertEqual(get_site_status_msg(self.course_key), None)
agpl-3.0
octopus-platform/octopus
python/octopus-mlutils/octopus/shell/completer/octopus_rlcompleter.py
5
2125
import readline class OctopusShellCompleter(object): def __init__(self, shell): self.shell = shell self.context = None self.matches = None readline.set_completer_delims(".") def complete(self, text, state): if state == 0: self.set_context() if self.context == 'traversal': self.matches = self._get_step_matches(text) elif self.context == 'start': self.matches = self.get_binding_matches(text) else: self.matches = [] try: return self.matches[state] except IndexError: return None def get_binding_matches(self, text): matches = self.shell.run_command("getBinding().getVariables().keySet()") return [match for match in matches if match.startswith(text)] def _get_step_matches(self, text): buffer = readline.get_line_buffer() tail = buffer.rsplit(".", 1)[0] matches = [] if tail: obj_class = self.shell.run_command("{}.getClass()".format(tail))[0].split(" ")[-1] obj_classname = obj_class.split(".")[-1] if obj_classname == "DefaultGraphTraversal": matches += self.shell.run_command("GraphTraversal.class.methods.name.unique()") matches += self.shell.run_command("GraphTraversal.metaClass.methods.name.unique()") matches += self.shell.run_command("getBinding().getVariable(\"sessionSteps\").keySet()") else: matches += self.shell.run_command("{}.getMethods().name.unique()".format(obj_class)) return [match for match in matches if match.startswith(text)] def set_context(self): line = readline.get_line_buffer() self.context = "start" for c in reversed(line): if c in '.': self.context = "traversal" return if c in ')}': self.context = "complete" return elif c in '({': self.context = "groovy" return
lgpl-3.0
omprakasha/odoo
addons/l10n_in_hr_payroll/report/payslip_report.py
340
3978
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields, osv class payslip_report(osv.osv): _name = "payslip.report" _description = "Payslip Analysis" _auto = False _columns = { 'name':fields.char('Name', readonly=True), 'date_from': fields.date('Date From', readonly=True,), 'date_to': fields.date('Date To', readonly=True,), 'year': fields.char('Year', size=4, readonly=True), 'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'), ('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'), ('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'state': fields.selection([ ('draft', 'Draft'), ('done', 'Done'), ('cancel', 'Rejected'), ], 'Status', readonly=True), 'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True), 'nbr': fields.integer('# Payslip lines', readonly=True), 'number': fields.char('Number', readonly=True), 'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True), 'company_id':fields.many2one('res.company', 'Company', readonly=True), 'paid': fields.boolean('Made Payment Order ? ', readonly=True), 'total': fields.float('Total', readonly=True), 'category_id':fields.many2one('hr.salary.rule.category', 'Category', readonly=True), } def init(self, cr): tools.drop_view_if_exists(cr, 'payslip_report') cr.execute(""" create or replace view payslip_report as ( select min(l.id) as id, l.name, p.struct_id, p.state, p.date_from, p.date_to, p.number, p.company_id, p.paid, l.category_id, l.employee_id, sum(l.total) as total, to_char(p.date_from, 'YYYY') as year, to_char(p.date_from, 'MM') as month, to_char(p.date_from, 'YYYY-MM-DD') as day, to_char(p.date_to, 'YYYY') as to_year, to_char(p.date_to, 'MM') as to_month, to_char(p.date_to, 'YYYY-MM-DD') as to_day, 1 AS nbr from hr_payslip as p left join hr_payslip_line as l on (p.id=l.slip_id) where l.employee_id IS NOT NULL group by p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid, l.employee_id,p.struct_id,l.category_id ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
anryko/ansible
lib/ansible/module_utils/network/eos/facts/lldp_global/lldp_global.py
21
2946
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The eos lldp_global fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from copy import deepcopy from ansible.module_utils.network.common import utils from ansible.module_utils.network.eos.argspec.lldp_global.lldp_global import Lldp_globalArgs class Lldp_globalFacts(object): """ The eos lldp_global fact class """ def __init__(self, module, subspec='config', options='options'): self._module = module self.argument_spec = Lldp_globalArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for lldp_global :param connection: the device connection :param ansible_facts: Facts dictionary :param data: previously collected conf :rtype: dictionary :returns: facts """ if not data: data = connection.get('show running-config | section lldp') obj = {} if data: obj.update(self.render_config(self.generated_spec, data)) ansible_facts['ansible_network_resources'].pop('lldp_global', None) facts = {} if obj: params = utils.validate_config(self.argument_spec, {'config': obj}) facts['lldp_global'] = utils.remove_empties(params['config']) else: facts['lldp_global'] = {} ansible_facts['ansible_network_resources'].update(facts) return ansible_facts def render_config(self, spec, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ config = deepcopy(spec) config['holdtime'] = utils.parse_conf_arg(conf, 'holdtime') config['reinit'] = utils.parse_conf_arg(conf, 'reinit') config['timer'] = utils.parse_conf_arg(conf, 'timer') for match in re.findall(r'^(no)? lldp tlv-select (\S+)', conf, re.MULTILINE): tlv_option = match[1].replace("-", "_") config['tlv_select'][tlv_option] = bool(match[0] != "no") return utils.remove_empties(config)
gpl-3.0
ternus/war-reporter
panoptic/modules/iostat.py
1
1795
from base import PanopticStatPlugin import subprocess class PanopticIOStat(PanopticStatPlugin): diffable = False """ Example output of iostat command: Linux 3.2.44-3.2.1.3-amd64-10875514 (foo.bar.com) 10/10/13 avg-cpu: %user %nice %system %iowait %steal %idle 0.29 0.00 0.91 0.00 0.00 98.79 Device: tps Blk_read/s Blk_wrtn/s Blk_read Blk_wrtn sdb 1.97 0.09 43.69 61998 31801992 sdb1 1.79 0.08 41.81 56712 30438680 sdb2 0.19 0.00 1.87 1586 1363304 sdb3 0.00 0.00 0.00 1176 0 sdb4 0.00 0.00 0.00 2140 8 sda 2.02 0.85 22.57 618914 16430648 sda1 0.42 0.14 3.60 104520 2621888 sda2 0.11 0.14 1.43 101050 1043720 sda3 1.11 0.52 11.38 377394 8283576 sda4 0.38 0.05 6.16 35558 4481464 """ def sample(self): raw_stats = [l.split() for l in subprocess.check_output(['iostat']).split('\n')] stats = {} host_data = raw_stats[0] stats['sys_type'] = host_data[0] stats['kernel'] = host_data[1] stats['hostname'] = host_data[2][1:-1] cpu_data = raw_stats[3] stats['user_cpu'] = cpu_data[0] stats['nice_cpu'] = cpu_data[1] stats['system_cpu'] = cpu_data[2] stats['iowait_cpu'] = cpu_data[3] stats['steal_cpu'] = cpu_data[4] stats['idle_cpu'] = cpu_data[5] self.stats = stats
mit
boyuegame/kbengine
kbe/src/lib/python/Lib/distutils/command/bdist_msi.py
152
35217
# Copyright (C) 2005, 2006 Martin von Löwis # Licensed to PSF under a Contributor Agreement. # The bdist_wininst command proper # based on bdist_wininst """ Implements the bdist_msi command. """ import sys, os from distutils.core import Command from distutils.dir_util import remove_tree from distutils.sysconfig import get_python_version from distutils.version import StrictVersion from distutils.errors import DistutilsOptionError from distutils.util import get_platform from distutils import log import msilib from msilib import schema, sequence, text from msilib import Directory, Feature, Dialog, add_data class PyDialog(Dialog): """Dialog class with a fixed layout: controls at the top, then a ruler, then a list of buttons: back, next, cancel. Optionally a bitmap at the left.""" def __init__(self, *args, **kw): """Dialog(database, name, x, y, w, h, attributes, title, first, default, cancel, bitmap=true)""" Dialog.__init__(self, *args) ruler = self.h - 36 bmwidth = 152*ruler/328 #if kw.get("bitmap", True): # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") self.line("BottomLine", 0, ruler, self.w, 0) def title(self, title): "Set the title text of the dialog at the top." # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, # text, in VerdanaBold10 self.text("Title", 15, 10, 320, 60, 0x30003, r"{\VerdanaBold10}%s" % title) def back(self, title, next, name = "Back", active = 1): """Add a back button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) def cancel(self, title, next, name = "Cancel", active = 1): """Add a cancel button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) def next(self, title, next, name = "Next", active = 1): """Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) def xbutton(self, name, title, next, xpos): """Add a button with a given title, the tab-next button, its name in the Control table, giving its x position; the y-position is aligned with the other buttons. Return the button, so that events can be associated""" return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) class bdist_msi(Command): description = "create a Microsoft Installer (.msi) binary distribution" user_options = [('bdist-dir=', None, "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('target-version=', None, "require a specific python version" + " on the target system"), ('no-target-compile', 'c', "do not compile .py to .pyc on the target system"), ('no-target-optimize', 'o', "do not compile .py to .pyo (optimized)" "on the target system"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('install-script=', None, "basename of installation script to be run after" "installation or before deinstallation"), ('pre-install-script=', None, "Fully qualified filename of a script to be run before " "any files are installed. This script need not be in the " "distribution"), ] boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', 'skip-build'] all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9'] other_version = 'X' def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.no_target_compile = 0 self.no_target_optimize = 0 self.target_version = None self.dist_dir = None self.skip_build = None self.install_script = None self.pre_install_script = None self.versions = None def finalize_options(self): self.set_undefined_options('bdist', ('skip_build', 'skip_build')) if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'msi') short_version = get_python_version() if (not self.target_version) and self.distribution.has_ext_modules(): self.target_version = short_version if self.target_version: self.versions = [self.target_version] if not self.skip_build and self.distribution.has_ext_modules()\ and self.target_version != short_version: raise DistutilsOptionError( "target version can only be %s, or the '--skip-build'" " option must be specified" % (short_version,)) else: self.versions = list(self.all_versions) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ) if self.pre_install_script: raise DistutilsOptionError( "the pre-install-script feature is not yet implemented") if self.install_script: for script in self.distribution.scripts: if self.install_script == os.path.basename(script): break else: raise DistutilsOptionError( "install_script '%s' not found in scripts" % self.install_script) self.install_script_key = None def run(self): if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.prefix = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 install_lib = self.reinitialize_command('install_lib') # we do not want to include pyc or pyo files install_lib.compile = 0 install_lib.optimize = 0 if self.distribution.has_ext_modules(): # If we are building an installer for a Python version other # than the one we are currently running, then we need to ensure # our build_lib reflects the other Python version rather than ours. # Note that for target_version!=sys.version, we must have skipped the # build step, so there is no issue with enforcing the build of this # version. target_version = self.target_version if not target_version: assert self.skip_build, "Should have already checked this" target_version = sys.version[0:3] plat_specifier = ".%s-%s" % (self.plat_name, target_version) build = self.get_finalized_command('build') build.build_lib = os.path.join(build.build_base, 'lib' + plat_specifier) log.info("installing to %s", self.bdist_dir) install.ensure_finalized() # avoid warning of 'install_lib' about installing # into a directory not in sys.path sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) install.run() del sys.path[0] self.mkpath(self.dist_dir) fullname = self.distribution.get_fullname() installer_name = self.get_installer_filename(fullname) installer_name = os.path.abspath(installer_name) if os.path.exists(installer_name): os.unlink(installer_name) metadata = self.distribution.metadata author = metadata.author if not author: author = metadata.maintainer if not author: author = "UNKNOWN" version = metadata.get_version() # ProductVersion must be strictly numeric # XXX need to deal with prerelease versions sversion = "%d.%d.%d" % StrictVersion(version).version # Prefix ProductName with Python x.y, so that # it sorts together with the other Python packages # in Add-Remove-Programs (APR) fullname = self.distribution.get_fullname() if self.target_version: product_name = "Python %s %s" % (self.target_version, fullname) else: product_name = "Python %s" % (fullname) self.db = msilib.init_database(installer_name, schema, product_name, msilib.gen_uuid(), sversion, author) msilib.add_tables(self.db, sequence) props = [('DistVersion', version)] email = metadata.author_email or metadata.maintainer_email if email: props.append(("ARPCONTACT", email)) if metadata.url: props.append(("ARPURLINFOABOUT", metadata.url)) if props: add_data(self.db, 'Property', props) self.add_find_python() self.add_files() self.add_scripts() self.add_ui() self.db.Commit() if hasattr(self.distribution, 'dist_files'): tup = 'bdist_msi', self.target_version or 'any', fullname self.distribution.dist_files.append(tup) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) def add_files(self): db = self.db cab = msilib.CAB("distfiles") rootdir = os.path.abspath(self.bdist_dir) root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") f = Feature(db, "Python", "Python", "Everything", 0, 1, directory="TARGETDIR") items = [(f, root, '')] for version in self.versions + [self.other_version]: target = "TARGETDIR" + version name = default = "Python" + version desc = "Everything" if version is self.other_version: title = "Python from another location" level = 2 else: title = "Python %s from registry" % version level = 1 f = Feature(db, name, title, desc, 1, level, directory=target) dir = Directory(db, cab, root, rootdir, target, default) items.append((f, dir, version)) db.Commit() seen = {} for feature, dir, version in items: todo = [dir] while todo: dir = todo.pop() for file in os.listdir(dir.absolute): afile = os.path.join(dir.absolute, file) if os.path.isdir(afile): short = "%s|%s" % (dir.make_short(file), file) default = file + version newdir = Directory(db, cab, dir, file, default, short) todo.append(newdir) else: if not dir.component: dir.start_component(dir.logical, feature, 0) if afile not in seen: key = seen[afile] = dir.add_file(file) if file==self.install_script: if self.install_script_key: raise DistutilsOptionError( "Multiple files with name %s" % file) self.install_script_key = '[#%s]' % key else: key = seen[afile] add_data(self.db, "DuplicateFile", [(key + version, dir.component, key, None, dir.logical)]) db.Commit() cab.commit(db) def add_find_python(self): """Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500 def add_scripts(self): if self.install_script: start = 6800 for ver in self.versions + [self.other_version]: install_action = "install_script." + ver exe_prop = "PYTHON" + ver add_data(self.db, "CustomAction", [(install_action, 50, exe_prop, self.install_script_key)]) add_data(self.db, "InstallExecuteSequence", [(install_action, "&Python%s=3" % ver, start)]) start += 1 # XXX pre-install scripts are currently refused in finalize_options() # but if this feature is completed, it will also need to add # entries for each version as the above code does if self.pre_install_script: scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") f = open(scriptfn, "w") # The batch file will be executed with [PYTHON], so that %1 # is the path to the Python interpreter; %0 will be the path # of the batch file. # rem =""" # %1 %0 # exit # """ # <actual script> f.write('rem ="""\n%1 %0\nexit\n"""\n') f.write(open(self.pre_install_script).read()) f.close() add_data(self.db, "Binary", [("PreInstall", msilib.Binary(scriptfn)) ]) add_data(self.db, "CustomAction", [("PreInstall", 2, "PreInstall", None) ]) add_data(self.db, "InstallExecuteSequence", [("PreInstall", "NOT Installed", 450)]) def add_ui(self): db = self.db x = y = 50 w = 370 h = 300 title = "[ProductName] Setup" # see "Dialog Style Bits" modal = 3 # visible | modal modeless = 1 # visible track_disk_space = 32 # UI customization properties add_data(db, "Property", # See "DefaultUIFont Property" [("DefaultUIFont", "DlgFont8"), # See "ErrorDialog Style Bit" ("ErrorDialog", "ErrorDlg"), ("Progress1", "Install"), # modified in maintenance type dlg ("Progress2", "installs"), ("MaintenanceForm_Action", "Repair"), # possible values: ALL, JUSTME ("WhichUsers", "ALL") ]) # Fonts, see "TextStyle Table" add_data(db, "TextStyle", [("DlgFont8", "Tahoma", 9, None, 0), ("DlgFontBold8", "Tahoma", 8, None, 1), #bold ("VerdanaBold10", "Verdana", 10, None, 1), ("VerdanaRed9", "Verdana", 9, 255, 0), ]) # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" # Numbers indicate sequence; see sequence.py for how these action integrate add_data(db, "InstallUISequence", [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), # In the user interface, assume all-users installation if privileged. ("SelectFeaturesDlg", "Not Installed", 1230), # XXX no support for resume installations yet #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), ("ProgressDlg", None, 1280)]) add_data(db, 'ActionText', text.ActionText) add_data(db, 'UIText', text.UIText) ##################################################################### # Standard dialogs: FatalError, UserExit, ExitDialog fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, "Finish", "Finish", "Finish") fatal.title("[ProductName] Installer ended prematurely") fatal.back("< Back", "Finish", active = 0) fatal.cancel("Cancel", "Back", active = 0) fatal.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") fatal.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c=fatal.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, "Finish", "Finish", "Finish") user_exit.title("[ProductName] Installer was interrupted") user_exit.back("< Back", "Finish", active = 0) user_exit.cancel("Cancel", "Back", active = 0) user_exit.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup was interrupted. Your system has not been modified. " "To install this program at a later time, please run the installation again.") user_exit.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = user_exit.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, "Finish", "Finish", "Finish") exit_dialog.title("Completing the [ProductName] Installer") exit_dialog.back("< Back", "Finish", active = 0) exit_dialog.cancel("Cancel", "Back", active = 0) exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = exit_dialog.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Return") ##################################################################### # Required dialog: FilesInUse, ErrorDlg inuse = PyDialog(db, "FilesInUse", x, y, w, h, 19, # KeepModeless|Modal|Visible title, "Retry", "Retry", "Retry", bitmap=False) inuse.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Files in Use") inuse.text("Description", 20, 23, 280, 20, 0x30003, "Some files that need to be updated are currently in use.") inuse.text("Text", 20, 55, 330, 50, 3, "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", None, None, None) c=inuse.back("Exit", "Ignore", name="Exit") c.event("EndDialog", "Exit") c=inuse.next("Ignore", "Retry", name="Ignore") c.event("EndDialog", "Ignore") c=inuse.cancel("Retry", "Exit", name="Retry") c.event("EndDialog","Retry") # See "Error Dialog". See "ICE20" for the required names of the controls. error = Dialog(db, "ErrorDlg", 50, 10, 330, 101, 65543, # Error|Minimize|Modal|Visible title, "ErrorText", None, None) error.text("ErrorText", 50,9,280,48,3, "") #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") ##################################################################### # Global "Query Cancel" dialog cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, "No", "No", "No") cancel.text("Text", 48, 15, 194, 30, 3, "Are you sure you want to cancel [ProductName] installation?") #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, # "py.ico", None, None) c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") c.event("EndDialog", "Exit") c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") c.event("EndDialog", "Return") ##################################################################### # Global "Wait for costing" dialog costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, "Return", "Return", "Return") costing.text("Text", 48, 15, 194, 30, 3, "Please wait while the installer finishes determining your disk space requirements.") c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) c.event("EndDialog", "Exit") ##################################################################### # Preparation dialog: no user input except cancellation prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel") prep.text("Description", 15, 70, 320, 40, 0x30003, "Please wait while the Installer prepares to guide you through the installation.") prep.title("Welcome to the [ProductName] Installer") c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") c.mapping("ActionText", "Text") c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) c.mapping("ActionData", "Text") prep.back("Back", None, active=0) prep.next("Next", None, active=0) c=prep.cancel("Cancel", None) c.event("SpawnDialog", "CancelDlg") ##################################################################### # Feature (Python directory) selection seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") seldlg.title("Select Python Installations") seldlg.text("Hint", 15, 30, 300, 20, 3, "Select the Python locations where %s should be installed." % self.distribution.get_fullname()) seldlg.back("< Back", None, active=0) c = seldlg.next("Next >", "Cancel") order = 1 c.event("[TARGETDIR]", "[SourceDir]", ordering=order) for version in self.versions + [self.other_version]: order += 1 c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, "FEATURE_SELECTED AND &Python%s=3" % version, ordering=order) c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) c.event("EndDialog", "Return", ordering=order + 2) c = seldlg.cancel("Cancel", "Features") c.event("SpawnDialog", "CancelDlg") c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, "FEATURE", None, "PathEdit", None) c.event("[FEATURE_SELECTED]", "1") ver = self.other_version install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver c = seldlg.text("Other", 15, 200, 300, 15, 3, "Provide an alternate Python location") c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, "TARGETDIR" + ver, None, "Next", None) c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) ##################################################################### # Disk cost cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, "OK", "OK", "OK", bitmap=False) cost.text("Title", 15, 6, 200, 15, 0x30003, "{\DlgFontBold8}Disk Space Requirements") cost.text("Description", 20, 20, 280, 20, 0x30003, "The disk space required for the installation of the selected features.") cost.text("Text", 20, 53, 330, 60, 3, "The highlighted volumes (if any) do not have enough disk space " "available for the currently selected features. You can either " "remove some files from the highlighted volumes, or choose to " "install less features onto local drive(s), or select different " "destination drive(s).") cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, None, "{120}{70}{70}{70}{70}", None, None) cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") ##################################################################### # WhichUsers Dialog. Only available on NT, and for privileged users. # This must be run before FindRelatedProducts, because that will # take into account whether the previous installation was per-user # or per-machine. We currently don't support going back to this # dialog after "Next" was selected; to support this, we would need to # find how to reset the ALLUSERS property, and how to re-run # FindRelatedProducts. # On Windows9x, the ALLUSERS property is ignored on the command line # and in the Property table, but installer fails according to the documentation # if a dialog attempts to set ALLUSERS. whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, "AdminInstall", "Next", "Cancel") whichusers.title("Select whether to install [ProductName] for all users of this computer.") # A radio group with two options: allusers, justme g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, "WhichUsers", "", "Next") g.add("ALL", 0, 5, 150, 20, "Install for all users") g.add("JUSTME", 0, 25, 150, 20, "Install just for me") whichusers.back("Back", None, active=0) c = whichusers.next("Next >", "Cancel") c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) c.event("EndDialog", "Return", ordering = 2) c = whichusers.cancel("Cancel", "AdminInstall") c.event("SpawnDialog", "CancelDlg") ##################################################################### # Installation Progress dialog (modeless) progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel", bitmap=False) progress.text("Title", 20, 15, 200, 15, 0x30003, "{\DlgFontBold8}[Progress1] [ProductName]") progress.text("Text", 35, 65, 300, 30, 3, "Please wait while the Installer [Progress2] [ProductName]. " "This may take several minutes.") progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") c.mapping("ActionText", "Text") #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) #c.mapping("ActionData", "Text") c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, None, "Progress done", None, None) c.mapping("SetProgress", "Progress") progress.back("< Back", "Next", active=False) progress.next("Next >", "Cancel", active=False) progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") ################################################################### # Maintenance type: repair/uninstall maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") maint.title("Welcome to the [ProductName] Setup Wizard") maint.text("BodyText", 15, 63, 330, 42, 3, "Select whether you want to repair or remove [ProductName].") g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, "MaintenanceForm_Action", "", "Next") #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") maint.back("< Back", None, active=False) c=maint.next("Finish", "Cancel") # Change installation: Change progress dialog to "Change", then ask # for feature selection #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) # Reinstall: Change progress dialog to "Repair", then invoke reinstall # Also set list of reinstalled features to "ALL" c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) # Uninstall: Change progress to "Remove", then invoke uninstall # Also set list of removed features to "ALL" c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) # Close dialog when maintenance action scheduled c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") def get_installer_filename(self, fullname): # Factored out to allow overriding in subclasses if self.target_version: base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, self.target_version) else: base_name = "%s.%s.msi" % (fullname, self.plat_name) installer_name = os.path.join(self.dist_dir, base_name) return installer_name
lgpl-3.0
zadgroup/edx-platform
common/test/acceptance/fixtures/edxnotes.py
86
1723
""" Tools for creating edxnotes content fixture data. """ import json import factory import requests from . import EDXNOTES_STUB_URL class Range(factory.Factory): FACTORY_FOR = dict start = "/div[1]/p[1]" end = "/div[1]/p[1]" startOffset = 0 endOffset = 8 class Note(factory.Factory): FACTORY_FOR = dict user = "dummy-user" usage_id = "dummy-usage-id" course_id = "dummy-course-id" text = "dummy note text" quote = "dummy note quote" ranges = [Range()] class EdxNotesFixtureError(Exception): """ Error occurred while installing a edxnote fixture. """ pass class EdxNotesFixture(object): notes = [] def create_notes(self, notes_list): self.notes = notes_list return self def install(self): """ Push the data to the stub EdxNotes service. """ response = requests.post( '{}/create_notes'.format(EDXNOTES_STUB_URL), data=json.dumps(self.notes) ) if not response.ok: raise EdxNotesFixtureError( "Could not create notes {0}. Status was {1}".format( json.dumps(self.notes), response.status_code ) ) return self def cleanup(self): """ Cleanup the stub EdxNotes service. """ self.notes = [] response = requests.put('{}/cleanup'.format(EDXNOTES_STUB_URL)) if not response.ok: raise EdxNotesFixtureError( "Could not cleanup EdxNotes service {0}. Status was {1}".format( json.dumps(self.notes), response.status_code ) ) return self
agpl-3.0
popazerty/dvbapp2-gui
lib/python/Plugins/SystemPlugins/SoftwareManager/BackupRestore.py
1
12814
from Screens.Screen import Screen from Screens.MessageBox import MessageBox from Screens.Console import Console from Components.ActionMap import ActionMap, NumberActionMap from Components.Pixmap import Pixmap from Components.Label import Label from Components.Sources.StaticText import StaticText from Components.MenuList import MenuList from Components.config import getConfigListEntry, configfile, ConfigSelection, ConfigSubsection, ConfigText, ConfigLocations from Components.config import config from Components.ConfigList import ConfigList,ConfigListScreen from Components.FileList import MultiFileSelectList from Plugins.Plugin import PluginDescriptor from enigma import eTimer, eEnv from Tools.Directories import * from os import popen, path, makedirs, listdir, access, stat, rename, remove, W_OK, R_OK from time import gmtime, strftime, localtime from datetime import date config.plugins.configurationbackup = ConfigSubsection() config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False) config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname']) def getBackupPath(): backuppath = config.plugins.configurationbackup.backuplocation.getValue() if backuppath.endswith('/'): return backuppath + 'backup' else: return backuppath + '/backup' def getBackupFilename(): return "enigma2settingsbackup.tar.gz" class BackupScreen(Screen, ConfigListScreen): skin = """ <screen position="135,144" size="350,310" title="Backup is running" > <widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" /> </screen>""" def __init__(self, session, runBackup = False): Screen.__init__(self, session) self.session = session self.runBackup = runBackup self["actions"] = ActionMap(["WizardActions", "DirectionActions"], { "ok": self.close, "back": self.close, "cancel": self.close, }, -1) self.finished_cb = None self.backuppath = getBackupPath() self.backupfile = getBackupFilename() self.fullbackupfilename = self.backuppath + "/" + self.backupfile self.list = [] ConfigListScreen.__init__(self, self.list) self.onLayoutFinish.append(self.layoutFinished) if self.runBackup: self.onShown.append(self.doBackup) def layoutFinished(self): self.setWindowTitle() def setWindowTitle(self): self.setTitle(_("Backup is running...")) def doBackup(self): configfile.save() try: if (path.exists(self.backuppath) == False): makedirs(self.backuppath) self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.getValue() ) if path.exists(self.fullbackupfilename): dt = str(date.fromtimestamp(stat(self.fullbackupfilename).st_ctime)) self.newfilename = self.backuppath + "/" + dt + '-' + self.backupfile if path.exists(self.newfilename): remove(self.newfilename) rename(self.fullbackupfilename,self.newfilename) if self.finished_cb: self.session.openWithCallback(self.finished_cb, Console, title = _("Backup is running..."), cmdlist = ["tar -czvf " + self.fullbackupfilename + " " + self.backupdirs],finishedCallback = self.backupFinishedCB,closeOnSuccess = True) else: self.session.open(Console, title = _("Backup is running..."), cmdlist = ["tar -czvf " + self.fullbackupfilename + " " + self.backupdirs],finishedCallback = self.backupFinishedCB, closeOnSuccess = True) except OSError: if self.finished_cb: self.session.openWithCallback(self.finished_cb, MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 ) else: self.session.openWithCallback(self.backupErrorCB,MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 ) def backupFinishedCB(self,retval = None): self.close(True) def backupErrorCB(self,retval = None): self.close(False) def runAsync(self, finished_cb): self.finished_cb = finished_cb self.doBackup() class BackupSelection(Screen): skin = """ <screen name="BackupSelection" position="center,center" size="560,400" title="Select files/folders to backup"> <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget name="checkList" position="5,50" size="550,250" transparent="1" scrollbarMode="showOnDemand" /> </screen>""" def __init__(self, session): Screen.__init__(self, session) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("Save")) self["key_yellow"] = StaticText() self.selectedFiles = config.plugins.configurationbackup.backupdirs.getValue() defaultDir = '/' inhibitDirs = ["/bin", "/boot", "/dev", "/autofs", "/lib", "/proc", "/sbin", "/sys", "/hdd", "/tmp", "/mnt", "/media"] self.filelist = MultiFileSelectList(self.selectedFiles, defaultDir, inhibitDirs = inhibitDirs ) self["checkList"] = self.filelist self["actions"] = ActionMap(["DirectionActions", "OkCancelActions", "ShortcutActions"], { "cancel": self.exit, "red": self.exit, "yellow": self.changeSelectionState, "green": self.saveSelection, "ok": self.okClicked, "left": self.left, "right": self.right, "down": self.down, "up": self.up }, -1) if not self.selectionChanged in self["checkList"].onSelectionChanged: self["checkList"].onSelectionChanged.append(self.selectionChanged) self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): idx = 0 self["checkList"].moveToIndex(idx) self.setWindowTitle() self.selectionChanged() def setWindowTitle(self): self.setTitle(_("Select files/folders to backup")) def selectionChanged(self): current = self["checkList"].getCurrent()[0] if current[2] is True: self["key_yellow"].setText(_("Deselect")) else: self["key_yellow"].setText(_("Select")) def up(self): self["checkList"].up() def down(self): self["checkList"].down() def left(self): self["checkList"].pageUp() def right(self): self["checkList"].pageDown() def changeSelectionState(self): self["checkList"].changeSelectionState() self.selectedFiles = self["checkList"].getSelectedList() def saveSelection(self): self.selectedFiles = self["checkList"].getSelectedList() config.plugins.configurationbackup.backupdirs.value = self.selectedFiles config.plugins.configurationbackup.backupdirs.save() config.plugins.configurationbackup.save() config.save() self.close(None) def exit(self): self.close(None) def okClicked(self): if self.filelist.canDescent(): self.filelist.descent() class RestoreMenu(Screen): skin = """ <screen name="RestoreMenu" position="center,center" size="560,400" title="Restore backups" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget name="filelist" position="5,50" size="550,230" scrollbarMode="showOnDemand" /> </screen>""" def __init__(self, session, plugin_path): Screen.__init__(self, session) self.skin_path = plugin_path self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("Restore")) self["key_yellow"] = StaticText(_("Delete")) self.sel = [] self.val = [] self.entry = False self.exe = False self.path = "" self["actions"] = NumberActionMap(["SetupActions"], { "ok": self.KeyOk, "cancel": self.keyCancel }, -1) self["shortcuts"] = ActionMap(["ShortcutActions"], { "red": self.keyCancel, "green": self.KeyOk, "yellow": self.deleteFile, }) self.flist = [] self["filelist"] = MenuList(self.flist) self.fill_list() self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setWindowTitle() def setWindowTitle(self): self.setTitle(_("Restore backups")) def fill_list(self): self.flist = [] self.path = getBackupPath() if (path.exists(self.path) == False): makedirs(self.path) for file in listdir(self.path): if (file.endswith(".tar.gz")): self.flist.append((file)) self.entry = True self.flist.sort(reverse=True) self["filelist"].l.setList(self.flist) def KeyOk(self): if (self.exe == False) and (self.entry == True): self.sel = self["filelist"].getCurrent() if self.sel: self.val = self.path + "/" + self.sel self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore\nthe following backup:\n%s\nYour receiver will restart after the backup has been restored!") % (self.sel)) def keyCancel(self): self.close() def startRestore(self, ret = False): if (ret == True): self.exe = True self.session.open(Console, title = _("Restoring..."), cmdlist = ["tar -xzvf " + self.path + "/" + self.sel + " -C /", "killall -9 enigma2"]) def deleteFile(self): if (self.exe == False) and (self.entry == True): self.sel = self["filelist"].getCurrent() if self.sel: self.val = self.path + "/" + self.sel self.session.openWithCallback(self.startDelete, MessageBox, _("Are you sure you want to delete\nthe following backup:\n") + self.sel) def startDelete(self, ret = False): if (ret == True): self.exe = True print "removing:",self.val if (path.exists(self.val) == True): remove(self.val) self.exe = False self.fill_list() class RestoreScreen(Screen, ConfigListScreen): skin = """ <screen position="135,144" size="350,310" title="Restore is running..." > <widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" /> </screen>""" def __init__(self, session, runRestore = False): Screen.__init__(self, session) self.session = session self.runRestore = runRestore self["actions"] = ActionMap(["WizardActions", "DirectionActions"], { "ok": self.close, "back": self.close, "cancel": self.close, }, -1) self.finished_cb = None self.backuppath = getBackupPath() self.backupfile = getBackupFilename() self.fullbackupfilename = self.backuppath + "/" + self.backupfile self.list = [] ConfigListScreen.__init__(self, self.list) self.onLayoutFinish.append(self.layoutFinished) if self.runRestore: self.onShown.append(self.doRestore) def layoutFinished(self): self.setWindowTitle() def setWindowTitle(self): self.setTitle(_("Restoring...")) def doRestore(self): if path.exists("/proc/stb/vmpeg/0/dst_width"): restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "echo 0 > /proc/stb/vmpeg/0/dst_height", "echo 0 > /proc/stb/vmpeg/0/dst_left", "echo 0 > /proc/stb/vmpeg/0/dst_top", "echo 0 > /proc/stb/vmpeg/0/dst_width", "killall -9 enigma2"] else: restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "killall -9 enigma2"] if self.finished_cb: self.session.openWithCallback(self.finished_cb, Console, title = _("Restoring..."), cmdlist = restorecmdlist) else: self.session.open(Console, title = _("Restoring..."), cmdlist = restorecmdlist) def backupFinishedCB(self,retval = None): self.close(True) def backupErrorCB(self,retval = None): self.close(False) def runAsync(self, finished_cb): self.finished_cb = finished_cb self.doRestore()
gpl-2.0
ptoraskar/django
tests/model_meta/test_legacy.py
199
7556
import warnings from django import test from django.contrib.contenttypes.fields import GenericRelation from django.core.exceptions import FieldDoesNotExist from django.db.models.fields import CharField, related from django.utils.deprecation import RemovedInDjango110Warning from .models import BasePerson, Person from .results import TEST_RESULTS class OptionsBaseTests(test.SimpleTestCase): def _map_related_query_names(self, res): return tuple((o.field.related_query_name(), m) for o, m in res) def _map_names(self, res): return tuple((f.name, m) for f, m in res) class M2MTests(OptionsBaseTests): def test_many_to_many_with_model(self): for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items(): with warnings.catch_warnings(record=True) as warning: warnings.simplefilter("always") models = [model for field, model in model._meta.get_m2m_with_model()] self.assertEqual([RemovedInDjango110Warning], [w.message.__class__ for w in warning]) self.assertEqual(models, expected_result) @test.ignore_warnings(category=RemovedInDjango110Warning) class RelatedObjectsTests(OptionsBaseTests): key_name = lambda self, r: r[0] def test_related_objects(self): result_key = 'get_all_related_objects_with_model_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model() self.assertEqual(self._map_related_query_names(objects), expected) def test_related_objects_local(self): result_key = 'get_all_related_objects_with_model_local_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model(local_only=True) self.assertEqual(self._map_related_query_names(objects), expected) def test_related_objects_include_hidden(self): result_key = 'get_all_related_objects_with_model_hidden_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model(include_hidden=True) self.assertEqual( sorted(self._map_names(objects), key=self.key_name), sorted(expected, key=self.key_name) ) def test_related_objects_include_hidden_local_only(self): result_key = 'get_all_related_objects_with_model_hidden_local_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model( include_hidden=True, local_only=True) self.assertEqual( sorted(self._map_names(objects), key=self.key_name), sorted(expected, key=self.key_name) ) def test_related_objects_proxy(self): result_key = 'get_all_related_objects_with_model_proxy_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model( include_proxy_eq=True) self.assertEqual(self._map_related_query_names(objects), expected) def test_related_objects_proxy_hidden(self): result_key = 'get_all_related_objects_with_model_proxy_hidden_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_objects_with_model( include_proxy_eq=True, include_hidden=True) self.assertEqual( sorted(self._map_names(objects), key=self.key_name), sorted(expected, key=self.key_name) ) @test.ignore_warnings(category=RemovedInDjango110Warning) class RelatedM2MTests(OptionsBaseTests): def test_related_m2m_with_model(self): result_key = 'get_all_related_many_to_many_with_model_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_m2m_objects_with_model() self.assertEqual(self._map_related_query_names(objects), expected) def test_related_m2m_local_only(self): result_key = 'get_all_related_many_to_many_local_legacy' for model, expected in TEST_RESULTS[result_key].items(): objects = model._meta.get_all_related_many_to_many_objects(local_only=True) self.assertEqual([o.field.related_query_name() for o in objects], expected) def test_related_m2m_asymmetrical(self): m2m = Person._meta.many_to_many self.assertTrue('following_base' in [f.attname for f in m2m]) related_m2m = Person._meta.get_all_related_many_to_many_objects() self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m]) def test_related_m2m_symmetrical(self): m2m = Person._meta.many_to_many self.assertTrue('friends_base' in [f.attname for f in m2m]) related_m2m = Person._meta.get_all_related_many_to_many_objects() self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m]) @test.ignore_warnings(category=RemovedInDjango110Warning) class GetFieldByNameTests(OptionsBaseTests): def test_get_data_field(self): field_info = Person._meta.get_field_by_name('data_abstract') self.assertEqual(field_info[1:], (BasePerson, True, False)) self.assertIsInstance(field_info[0], CharField) def test_get_m2m_field(self): field_info = Person._meta.get_field_by_name('m2m_base') self.assertEqual(field_info[1:], (BasePerson, True, True)) self.assertIsInstance(field_info[0], related.ManyToManyField) def test_get_related_object(self): field_info = Person._meta.get_field_by_name('relating_baseperson') self.assertEqual(field_info[1:], (BasePerson, False, False)) self.assertTrue(field_info[0].auto_created) def test_get_related_m2m(self): field_info = Person._meta.get_field_by_name('relating_people') self.assertEqual(field_info[1:], (None, False, True)) self.assertTrue(field_info[0].auto_created) def test_get_generic_relation(self): field_info = Person._meta.get_field_by_name('generic_relation_base') self.assertEqual(field_info[1:], (None, True, False)) self.assertIsInstance(field_info[0], GenericRelation) def test_get_m2m_field_invalid(self): with warnings.catch_warnings(record=True) as warning: warnings.simplefilter("always") self.assertRaises( FieldDoesNotExist, Person._meta.get_field, **{'field_name': 'm2m_base', 'many_to_many': False} ) self.assertEqual(Person._meta.get_field('m2m_base', many_to_many=True).name, 'm2m_base') # 2 RemovedInDjango110Warning messages should be raised, one for each call of get_field() # with the 'many_to_many' argument. self.assertEqual( [RemovedInDjango110Warning, RemovedInDjango110Warning], [w.message.__class__ for w in warning] ) @test.ignore_warnings(category=RemovedInDjango110Warning) class GetAllFieldNamesTestCase(OptionsBaseTests): def test_get_all_field_names(self): for model, expected_names in TEST_RESULTS['get_all_field_names'].items(): objects = model._meta.get_all_field_names() self.assertEqual(sorted(map(str, objects)), sorted(expected_names))
bsd-3-clause
heliortf/Google-Forum-Engine
lib/werkzeug/local.py
97
14123
# -*- coding: utf-8 -*- """ werkzeug.local ~~~~~~~~~~~~~~ This module implements context-local objects. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from functools import update_wrapper from werkzeug.wsgi import ClosingIterator from werkzeug._compat import PY2, implements_bool # since each thread has its own greenlet we can just use those as identifiers # for the context. If greenlets are not available we fall back to the # current thread ident depending on where it is. try: from greenlet import getcurrent as get_ident except ImportError: try: from thread import get_ident except ImportError: from _thread import get_ident def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`LocalStack` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return LocalProxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it returns a proxy that resolves to the topmost item on the stack. .. versionadded:: 0.6.1 """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return LocalProxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Everytime the manager cleans up it, will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. .. versionchanged:: 0.6.1 Instead of a manager the :func:`release_local` function can be used as well. .. versionchanged:: 0.7 `ident_func` was added. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. .. versionchanged:: 0.7 You can pass a different ident function to the local manager that will then be propagated to all the locals passed to the constructor. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def make_middleware(self, app): """Wrap a WSGI application so that cleaning up happens after request end. """ def application(environ, start_response): return ClosingIterator(app(environ, start_response), self.cleanup) return application def middleware(self, func): """Like `make_middleware` but for decorating functions. Example usage:: @manager.middleware def application(environ, start_response): ... The difference to `make_middleware` is that the function passed will have all the arguments copied from the inner application (name, docstring, module). """ return update_wrapper(self.make_middleware(func), func) def __repr__(self): return '<%s storages: %d>' % ( self.__class__.__name__, len(self.locals) ) @implements_bool class LocalProxy(object): """Acts as a proxy for a werkzeug local. Forwards all operations to a proxied object. The only operations not supported for forwarding are right handed operands and any kind of assignment. Example usage:: from werkzeug.local import Local l = Local() # these are proxies request = l('request') user = l('user') from werkzeug.local import LocalStack _response_local = LocalStack() # this is a proxy response = _response_local() Whenever something is bound to l.user / l.request the proxy objects will forward all operations. If no object is bound a :exc:`RuntimeError` will be raised. To create proxies to :class:`Local` or :class:`LocalStack` objects, call the object as shown above. If you want to have a proxy to an object looked up by a function, you can (as of Werkzeug 0.6.1) pass a function to the :class:`LocalProxy` constructor:: session = LocalProxy(lambda: get_current_request().session) .. versionchanged:: 0.6.1 The class can be instanciated with a callable as well now. """ __slots__ = ('__local', '__dict__', '__name__') def __init__(self, local, name=None): object.__setattr__(self, '_LocalProxy__local', local) object.__setattr__(self, '__name__', name) def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, '__release_local__'): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError('no object bound to %s' % self.__name__) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: return '<%s unbound>' % self.__class__.__name__ return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: return False def __unicode__(self): try: return unicode(self._get_current_object()) # noqa except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] if PY2: __getslice__ = lambda x, i, j: x._get_current_object()[i:j] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) __str__ = lambda x: str(x._get_current_object()) __lt__ = lambda x, o: x._get_current_object() < o __le__ = lambda x, o: x._get_current_object() <= o __eq__ = lambda x, o: x._get_current_object() == o __ne__ = lambda x, o: x._get_current_object() != o __gt__ = lambda x, o: x._get_current_object() > o __ge__ = lambda x, o: x._get_current_object() >= o __cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa __hash__ = lambda x: hash(x._get_current_object()) __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) __len__ = lambda x: len(x._get_current_object()) __getitem__ = lambda x, i: x._get_current_object()[i] __iter__ = lambda x: iter(x._get_current_object()) __contains__ = lambda x, i: i in x._get_current_object() __add__ = lambda x, o: x._get_current_object() + o __sub__ = lambda x, o: x._get_current_object() - o __mul__ = lambda x, o: x._get_current_object() * o __floordiv__ = lambda x, o: x._get_current_object() // o __mod__ = lambda x, o: x._get_current_object() % o __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) __pow__ = lambda x, o: x._get_current_object() ** o __lshift__ = lambda x, o: x._get_current_object() << o __rshift__ = lambda x, o: x._get_current_object() >> o __and__ = lambda x, o: x._get_current_object() & o __xor__ = lambda x, o: x._get_current_object() ^ o __or__ = lambda x, o: x._get_current_object() | o __div__ = lambda x, o: x._get_current_object().__div__(o) __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) __neg__ = lambda x: -(x._get_current_object()) __pos__ = lambda x: +(x._get_current_object()) __abs__ = lambda x: abs(x._get_current_object()) __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) __long__ = lambda x: long(x._get_current_object()) # noqa __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) __radd__ = lambda x, o: o + x._get_current_object() __rsub__ = lambda x, o: o - x._get_current_object() __rmul__ = lambda x, o: o * x._get_current_object() __rdiv__ = lambda x, o: o / x._get_current_object() if PY2: __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) else: __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() __rmod__ = lambda x, o: o % x._get_current_object() __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
apache-2.0
quinot/ansible
lib/ansible/plugins/callback/yaml.py
23
4317
# (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: yaml type: stdout short_description: yaml-ized Ansible screen output version_added: 2.5 description: - Ansible output that can be quite a bit easier to read than the default JSON formatting. extends_documentation_fragment: - default_callback requirements: - set as stdout in configuration ''' import yaml import json import re import string import sys from ansible.plugins.callback import CallbackBase, strip_internal_keys from ansible.plugins.callback.default import CallbackModule as Default from ansible.parsing.yaml.dumper import AnsibleDumper # from http://stackoverflow.com/a/15423007/115478 def should_use_block(value): """Returns true if string should be in block format""" for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": if c in value: return True return False def my_represent_scalar(self, tag, value, style=None): """Uses block style for multi-line strings""" if style is None: if should_use_block(value): style = '|' # we care more about readable than accuracy, so... # ...no trailing space value = value.rstrip() # ...and non-printable characters value = filter(lambda x: x in string.printable, value) # ...tabs prevent blocks from expanding value = value.expandtabs() # ...and odd bits of whitespace value = re.sub(r'[\x0b\x0c\r]', '', value) # ...as does trailing space value = re.sub(r' +\n', '\n', value) else: style = self.default_style node = yaml.representer.ScalarNode(tag, value, style=style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node return node class CallbackModule(Default): """ Variation of the Default output which uses nicely readable YAML instead of JSON for printing results. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'yaml' def __init__(self): super(CallbackModule, self).__init__() yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")) # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] dumped = '' # put changed and skipped into a header line if 'changed' in abridged_result: dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' del abridged_result['changed'] if 'skipped' in abridged_result: dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' del abridged_result['skipped'] # if we already have stdout, we don't need stdout_lines if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: abridged_result['stdout_lines'] = '<omitted>' if abridged_result: dumped += '\n' dumped += yaml.dump(abridged_result, width=1000, Dumper=AnsibleDumper, default_flow_style=False) # indent by a couple of spaces dumped = '\n '.join(dumped.split('\n')).rstrip() return dumped
gpl-3.0
efiring/scipy
scipy/misc/tests/test_common.py
20
4028
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_equal) from scipy.misc import pade, logsumexp, face, ascent from scipy._lib._version import NumpyVersion def test_pade_trivial(): nump, denomp = pade([1.0], 0) assert_array_equal(nump.c, [1.0]) assert_array_equal(denomp.c, [1.0]) def test_pade_4term_exp(): # First four Taylor coefficients of exp(x). # Unlike poly1d, the first array element is the zero-order term. an = [1.0, 1.0, 0.5, 1.0/6] nump, denomp = pade(an, 0) assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0]) assert_array_almost_equal(denomp.c, [1.0]) nump, denomp = pade(an, 1) assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0]) assert_array_almost_equal(denomp.c, [-1.0/3, 1.0]) nump, denomp = pade(an, 2) assert_array_almost_equal(nump.c, [1.0/3, 1.0]) assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0]) nump, denomp = pade(an, 3) assert_array_almost_equal(nump.c, [1.0]) assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0]) def test_logsumexp(): # Test whether logsumexp() function correctly handles large inputs. a = np.arange(200) desired = np.log(np.sum(np.exp(a))) assert_almost_equal(logsumexp(a), desired) # Now test with large numbers b = [1000, 1000] desired = 1000.0 + np.log(2.0) assert_almost_equal(logsumexp(b), desired) n = 1000 b = np.ones(n) * 10000 desired = 10000.0 + np.log(n) assert_almost_equal(logsumexp(b), desired) x = np.array([1e-40] * 1000000) logx = np.log(x) X = np.vstack([x, x]) logX = np.vstack([logx, logx]) assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum()) assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) # Handling special values properly assert_equal(logsumexp(np.inf), np.inf) assert_equal(logsumexp(-np.inf), -np.inf) assert_equal(logsumexp(np.nan), np.nan) assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf) # Handling an array with different magnitudes on the axes assert_array_almost_equal(logsumexp([[1e10, 1e-10], [-1e10, -np.inf]], axis=-1), [1e10, -1e10]) # Test keeping dimensions assert_array_almost_equal(logsumexp([[1e10, 1e-10], [-1e10, -np.inf]], axis=-1, keepdims=True), [[1e10], [-1e10]]) # Test multiple axes if NumpyVersion(np.__version__) >= NumpyVersion('1.7.0'): assert_array_almost_equal(logsumexp([[1e10, 1e-10], [-1e10, -np.inf]], axis=(-1,-2)), 1e10) def test_logsumexp_b(): a = np.arange(200) b = np.arange(200, 0, -1) desired = np.log(np.sum(b*np.exp(a))) assert_almost_equal(logsumexp(a, b=b), desired) a = [1000, 1000] b = [1.2, 1.2] desired = 1000 + np.log(2 * 1.2) assert_almost_equal(logsumexp(a, b=b), desired) x = np.array([1e-40] * 100000) b = np.linspace(1, 1000, 1e5) logx = np.log(x) X = np.vstack((x, x)) logX = np.vstack((logx, logx)) B = np.vstack((b, b)) assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum()) assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)), (B * X).sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)), (B * X).sum(axis=1)) def test_face(): assert_equal(face().shape, (768, 1024, 3)) def test_ascent(): assert_equal(ascent().shape, (512, 512))
bsd-3-clause
2014c2g5/2014c2
exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/suite.py
748
9715
"""TestSuite""" import sys from . import case from . import util __unittest = True def _call_if_exists(parent, attr): func = getattr(parent, attr, lambda: None) func() class BaseTestSuite(object): """A simple test suite that doesn't provide class or module shared fixtures. """ def __init__(self, tests=()): self._tests = [] self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self)) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return list(self) == list(other) def __ne__(self, other): return not self == other def __iter__(self): return iter(self._tests) def countTestCases(self): cases = 0 for test in self: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not callable(test): raise TypeError("{} is not callable".format(repr(test))) if isinstance(test, type) and issubclass(test, (case.TestCase, TestSuite)): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, str): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for test in self: if result.shouldStop: break test(result) return result def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self: test.debug() class TestSuite(BaseTestSuite): """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def run(self, result, debug=False): topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True for test in self: if result.shouldStop: break if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if not debug: test(result) else: test.debug() if topLevel: self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) result._testRunEntered = False return result def debug(self): """Run the tests without collecting errors in a TestResult""" debug = _DebugResult() self.run(debug, True) ################################ def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: setUpClass() except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _get_previous_module(self, result): previousModule = None previousClass = getattr(result, '_previousTestClass', None) if previousClass is not None: previousModule = previousClass.__module__ return previousModule def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except KeyError: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: _call_if_exists(result, '_setupStdout') try: setUpModule() except Exception as e: if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True errorName = 'setUpModule (%s)' % currentModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _addClassOrModuleLevelException(self, result, exception, errorName): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) if addSkip is not None and isinstance(exception, case.SkipTest): addSkip(error, str(exception)) else: result.addError(error, sys.exc_info()) def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except KeyError: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: _call_if_exists(result, '_setupStdout') try: tearDownModule() except Exception as e: if isinstance(result, _DebugResult): raise errorName = 'tearDownModule (%s)' % previousModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: _call_if_exists(result, '_setupStdout') try: tearDownClass() except Exception as e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') class _ErrorHolder(object): """ Placeholder for a TestCase inside a result. As far as a TestResult is concerned, this looks exactly like a unit test. Used to insert arbitrary errors into a test suite run. """ # Inspired by the ErrorHolder from Twisted: # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py # attribute used by TestResult._exc_info_to_string failureException = None def __init__(self, description): self.description = description def id(self): return self.description def shortDescription(self): return None def __repr__(self): return "<ErrorHolder description=%r>" % (self.description,) def __str__(self): return self.id() def run(self, result): # could call result.addError(...) - but this test-like object # shouldn't be run anyway pass def __call__(self, result): return self.run(result) def countTestCases(self): return 0 def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False class _DebugResult(object): "Used by the TestSuite to hold previous class when running in debug." _previousTestClass = None _moduleSetUpFailed = False shouldStop = False
gpl-2.0
simobasso/ansible
lib/ansible/plugins/lookup/csvfile.py
37
2629
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import codecs import csv from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def read_csv(self, filename, key, delimiter, dflt=None, col=1): try: f = codecs.open(filename, 'r', encoding='utf-8') creader = csv.reader(f, delimiter=str(delimiter)) for row in creader: if row[0] == key: return row[int(col)] except Exception as e: raise AnsibleError("csvfile: %s" % str(e)) return dflt def run(self, terms, variables=None, **kwargs): basedir = self.get_basedir(variables) ret = [] for term in terms: params = term.split() key = params[0] paramvals = { 'file' : 'ansible.csv', 'default' : None, 'delimiter' : "TAB", 'col' : "1", # column to return } # parameters specified? try: for param in params[1:]: name, value = param.split('=') assert(name in paramvals) paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) if paramvals['delimiter'] == 'TAB': paramvals['delimiter'] = "\t" lookupfile = self._loader.path_dwim_relative(basedir, 'files', paramvals['file']) var = self.read_csv(lookupfile, key, str(paramvals['delimiter']), paramvals['default'], paramvals['col']) if var is not None: if type(var) is list: for v in var: ret.append(v) else: ret.append(var) return ret
gpl-3.0
RoyalTS/econ-python-environment
.mywaflib/waflib/Tools/c_osx.py
10
5517
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2008-2010 """ MacOSX related tools """ import os, shutil, sys, platform from waflib import TaskGen, Task, Build, Options, Utils, Errors from waflib.TaskGen import taskgen_method, feature, after_method, before_method app_info = ''' <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd"> <plist version="0.9"> <dict> <key>CFBundlePackageType</key> <string>APPL</string> <key>CFBundleGetInfoString</key> <string>Created by Waf</string> <key>CFBundleSignature</key> <string>????</string> <key>NOTE</key> <string>THIS IS A GENERATED FILE, DO NOT MODIFY</string> <key>CFBundleExecutable</key> <string>%s</string> </dict> </plist> ''' """ plist template """ @feature('c', 'cxx') def set_macosx_deployment_target(self): """ see WAF issue 285 and also and also http://trac.macports.org/ticket/17059 """ if self.env['MACOSX_DEPLOYMENT_TARGET']: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET'] elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: if Utils.unversioned_sys_platform() == 'darwin': os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) @taskgen_method def create_bundle_dirs(self, name, out): """ Create bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp` """ bld = self.bld dir = out.parent.find_or_declare(name) dir.mkdir() macos = dir.find_or_declare(['Contents', 'MacOS']) macos.mkdir() return dir def bundle_name_for_output(out): name = out.name k = name.rfind('.') if k >= 0: name = name[:k] + '.app' else: name = name + '.app' return name @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macapp(self): """ To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_app = True) To force *all* executables to be transformed into Mac applications:: def build(bld): bld.env.MACAPP = True bld.shlib(source='a.c', target='foo') """ if self.env['MACAPP'] or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) self.apptask = self.create_task('macapp', self.link_task.outputs, n1) inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name self.bld.install_files(inst_to, n1, chmod=Utils.O755) if getattr(self, 'mac_resources', None): res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for x in self.to_list(self.mac_resources): node = self.path.find_node(x) if not node: raise Errors.WafError('Missing mac_resource %r in %r' % (x, self)) parent = node.parent if os.path.isdir(node.abspath()): nodes = node.ant_glob('**') else: nodes = [node] for node in nodes: rel = node.path_from(parent) tsk = self.create_task('macapp', node, res_dir.make_node(rel)) self.bld.install_as(inst_to + '/%s' % rel, node) if getattr(self.bld, 'is_install', None): # disable the normal binary installation self.install_task.hasrun = Task.SKIP_ME @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macplist(self): """ Create a :py:class:`waflib.Tools.c_osx.macplist` instance. """ if self.env['MACAPP'] or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) self.plisttask = plisttask = self.create_task('macplist', [], n1) if getattr(self, 'mac_plist', False): node = self.path.find_resource(self.mac_plist) if node: plisttask.inputs.append(node) else: plisttask.code = self.mac_plist else: plisttask.code = app_info % self.link_task.outputs[0].name inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name self.bld.install_files(inst_to, n1) @feature('cshlib', 'cxxshlib') @before_method('apply_link', 'propagate_uselib_vars') def apply_bundle(self): """ To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_bundle = True) To force *all* executables to be transformed into bundles:: def build(bld): bld.env.MACBUNDLE = True bld.shlib(source='a.c', target='foo') """ if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False): self.env['LINKFLAGS_cshlib'] = self.env['LINKFLAGS_cxxshlib'] = [] # disable the '-dynamiclib' flag self.env['cshlib_PATTERN'] = self.env['cxxshlib_PATTERN'] = self.env['macbundle_PATTERN'] use = self.use = self.to_list(getattr(self, 'use', [])) if not 'MACBUNDLE' in use: use.append('MACBUNDLE') app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] class macapp(Task.Task): """ Create mac applications """ color = 'PINK' def run(self): self.outputs[0].parent.mkdir() shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath()) class macplist(Task.Task): """ Create plist files """ color = 'PINK' ext_in = ['.bin'] def run(self): if getattr(self, 'code', None): txt = self.code else: txt = self.inputs[0].read() self.outputs[0].write(txt)
bsd-3-clause
sagangwee/sagangwee.github.io
build/pygments/build/lib.linux-i686-2.7/pygments/formatters/rtf.py
73
5049
# -*- coding: utf-8 -*- """ pygments.formatters.rtf ~~~~~~~~~~~~~~~~~~~~~~~ A formatter that generates RTF files. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_int_opt, _surrogatepair __all__ = ['RtfFormatter'] class RtfFormatter(Formatter): """ Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents. Please note that ``encoding`` and ``outencoding`` options are ignored. The RTF format is ASCII natively, but handles unicode characters correctly thanks to escape sequences. .. versionadded:: 0.6 Additional options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). `fontface` The used font famliy, for example ``Bitstream Vera Sans``. Defaults to some generic font which is supposed to have fixed width. `fontsize` Size of the font used. Size is specified in half points. The default is 24 half-points, giving a size 12 font. .. versionadded:: 2.0 """ name = 'RTF' aliases = ['rtf'] filenames = ['*.rtf'] def __init__(self, **options): r""" Additional options accepted: ``fontface`` Name of the font used. Could for example be ``'Courier New'`` to further specify the default which is ``'\fmodern'``. The RTF specification claims that ``\fmodern`` are "Fixed-pitch serif and sans serif fonts". Hope every RTF implementation thinks the same about modern... """ Formatter.__init__(self, **options) self.fontface = options.get('fontface') or '' self.fontsize = get_int_opt(options, 'fontsize', 0) def _escape(self, text): return text.replace(u'\\', u'\\\\') \ .replace(u'{', u'\\{') \ .replace(u'}', u'\\}') def _escape_text(self, text): # empty strings, should give a small performance improvment if not text: return u'' # escape text text = self._escape(text) buf = [] for c in text: cn = ord(c) if cn < (2**7): # ASCII character buf.append(str(c)) elif (2**7) <= cn < (2**16): # single unicode escape sequence buf.append(u'{\\u%d}' % cn) elif (2**16) <= cn: # RTF limits unicode to 16 bits. # Force surrogate pairs buf.append(u'{\\u%d}{\\u%d}' % _surrogatepair(cn)) return u''.join(buf).replace(u'\n', u'\\par\n') def format_unencoded(self, tokensource, outfile): # rtf 1.8 header outfile.write(u'{\\rtf1\\ansi\\uc0\\deff0' u'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}' u'{\\colortbl;' % (self.fontface and u' ' + self._escape(self.fontface) or u'')) # convert colors and save them in a mapping to access them later. color_mapping = {} offset = 1 for _, style in self.style: for color in style['color'], style['bgcolor'], style['border']: if color and color not in color_mapping: color_mapping[color] = offset outfile.write(u'\\red%d\\green%d\\blue%d;' % ( int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16) )) offset += 1 outfile.write(u'}\\f0 ') if self.fontsize: outfile.write(u'\\fs%d' % (self.fontsize)) # highlight stream for ttype, value in tokensource: while not self.style.styles_token(ttype) and ttype.parent: ttype = ttype.parent style = self.style.style_for_token(ttype) buf = [] if style['bgcolor']: buf.append(u'\\cb%d' % color_mapping[style['bgcolor']]) if style['color']: buf.append(u'\\cf%d' % color_mapping[style['color']]) if style['bold']: buf.append(u'\\b') if style['italic']: buf.append(u'\\i') if style['underline']: buf.append(u'\\ul') if style['border']: buf.append(u'\\chbrdr\\chcfpat%d' % color_mapping[style['border']]) start = u''.join(buf) if start: outfile.write(u'{%s ' % start) outfile.write(self._escape_text(value)) if start: outfile.write(u'}') outfile.write(u'}')
mit
songmonit/CTTMSONLINE
addons/account/account_financial_report.py
339
7636
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil.relativedelta import relativedelta from operator import itemgetter from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ # --------------------------------------------------------- # Account Financial Report # --------------------------------------------------------- class account_financial_report(osv.osv): _name = "account.financial.report" _description = "Account Report" def _get_level(self, cr, uid, ids, field_name, arg, context=None): '''Returns a dictionary with key=the ID of a record and value = the level of this record in the tree structure.''' res = {} for report in self.browse(cr, uid, ids, context=context): level = 0 if report.parent_id: level = report.parent_id.level + 1 res[report.id] = level return res def _get_children_by_order(self, cr, uid, ids, context=None): '''returns a dictionary with the key= the ID of a record and value = all its children, computed recursively, and sorted by sequence. Ready for the printing''' res = [] for id in ids: res.append(id) ids2 = self.search(cr, uid, [('parent_id', '=', id)], order='sequence ASC', context=context) res += self._get_children_by_order(cr, uid, ids2, context=context) return res def _get_balance(self, cr, uid, ids, field_names, args, context=None): '''returns a dictionary with key=the ID of a record and value=the balance amount computed for this record. If the record is of type : 'accounts' : it's the sum of the linked accounts 'account_type' : it's the sum of leaf accoutns with such an account_type 'account_report' : it's the amount of the related report 'sum' : it's the sum of the children of this record (aka a 'view' record)''' account_obj = self.pool.get('account.account') res = {} for report in self.browse(cr, uid, ids, context=context): if report.id in res: continue res[report.id] = dict((fn, 0.0) for fn in field_names) if report.type == 'accounts': # it's the sum of the linked accounts for a in report.account_ids: for field in field_names: res[report.id][field] += getattr(a, field) elif report.type == 'account_type': # it's the sum the leaf accounts with such an account type report_types = [x.id for x in report.account_type_ids] account_ids = account_obj.search(cr, uid, [('user_type','in', report_types), ('type','!=','view')], context=context) for a in account_obj.browse(cr, uid, account_ids, context=context): for field in field_names: res[report.id][field] += getattr(a, field) elif report.type == 'account_report' and report.account_report_id: # it's the amount of the linked report res2 = self._get_balance(cr, uid, [report.account_report_id.id], field_names, False, context=context) for key, value in res2.items(): for field in field_names: res[report.id][field] += value[field] elif report.type == 'sum': # it's the sum of the children of this account.report res2 = self._get_balance(cr, uid, [rec.id for rec in report.children_ids], field_names, False, context=context) for key, value in res2.items(): for field in field_names: res[report.id][field] += value[field] return res _columns = { 'name': fields.char('Report Name', required=True, translate=True), 'parent_id': fields.many2one('account.financial.report', 'Parent'), 'children_ids': fields.one2many('account.financial.report', 'parent_id', 'Account Report'), 'sequence': fields.integer('Sequence'), 'balance': fields.function(_get_balance, 'Balance', multi='balance'), 'debit': fields.function(_get_balance, 'Debit', multi='balance'), 'credit': fields.function(_get_balance, 'Credit', multi="balance"), 'level': fields.function(_get_level, string='Level', store=True, type='integer'), 'type': fields.selection([ ('sum','View'), ('accounts','Accounts'), ('account_type','Account Type'), ('account_report','Report Value'), ],'Type'), 'account_ids': fields.many2many('account.account', 'account_account_financial_report', 'report_line_id', 'account_id', 'Accounts'), 'account_report_id': fields.many2one('account.financial.report', 'Report Value'), 'account_type_ids': fields.many2many('account.account.type', 'account_account_financial_report_type', 'report_id', 'account_type_id', 'Account Types'), 'sign': fields.selection([(-1, 'Reverse balance sign'), (1, 'Preserve balance sign')], 'Sign on Reports', required=True, help='For accounts that are typically more debited than credited and that you would like to print as negative amounts in your reports, you should reverse the sign of the balance; e.g.: Expense account. The same applies for accounts that are typically more credited than debited and that you would like to print as positive amounts in your reports; e.g.: Income account.'), 'display_detail': fields.selection([ ('no_detail','No detail'), ('detail_flat','Display children flat'), ('detail_with_hierarchy','Display children with hierarchy') ], 'Display details'), 'style_overwrite': fields.selection([ (0, 'Automatic formatting'), (1,'Main Title 1 (bold, underlined)'), (2,'Title 2 (bold)'), (3,'Title 3 (bold, smaller)'), (4,'Normal Text'), (5,'Italic Text (smaller)'), (6,'Smallest Text'), ],'Financial Report Style', help="You can set up here the format you want this record to be displayed. If you leave the automatic formatting, it will be computed based on the financial reports hierarchy (auto-computed field 'level')."), } _defaults = { 'type': 'sum', 'display_detail': 'detail_flat', 'sign': 1, 'style_overwrite': 0, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
MachineLearningControl/OpenMLC-Python
MLC/GUI/Experiment/ArduinoConfigManager/test.py
1
2275
# -*- coding: utf-8 -*- # MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems # Copyright (C) 2015-2017, Thomas Duriez ([email protected]) # Copyright (C) 2015, Adrian Durán ([email protected]) # Copyright (C) 2015-2017, Ezequiel Torres Feyuk ([email protected]) # Copyright (C) 2016-2017, Marco Germano Zbrun ([email protected]) # Copyright (C) 2016-2017, Raúl Lopez Skuba ([email protected]) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> import sys from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog from ArduinoBoardDialog import ArduinoBoardDialog from MLC.GUI.Autogenerated.autogenerated import Ui_BoardConfigurationWindow from MLC.GUI.Experiment.ArduinoConfigManager.BoardConfigurationWindow import BoardConfigurationWindow from MLC.arduino.protocol import ProtocolConfig from MLC.arduino.connection.serialconnection import SerialConnectionConfig from ArduinoBoardManager import ArduinoBoardManager def showPinout(): dialog = ArduinoBoardDialog("images/uno.jpg") dialog.exec_() # window = QDialog() # ui = Ui_BoardPinout() # ui.setupUi(window) # window.exec_() if __name__ == '__main__': app = QApplication(sys.argv) # window = BoardConfigurationWindow() protocol_cfg = ProtocolConfig(None) connection_cfg = SerialConnectionConfig('/dev/ttyACM0') manager = ArduinoBoardManager(protocol_cfg, connection_cfg) # ui = Ui_BoardConfigurationFrame() # ui.setupUi(window) # ui.showPinout.clicked.connect(showPinout) manager.start() app.exec_() manager = ArduinoBoardManager(protocol_cfg, connection_cfg) manager.start() app.exec_()
gpl-3.0
noseka1/ansible-modules-core
system/user.py
16
72414
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Stephen Fromm <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: user author: "Stephen Fromm (@sfromm)" version_added: "0.2" short_description: Manage user accounts requirements: [ useradd, userdel, usermod ] description: - Manage user accounts and user attributes. options: name: required: true aliases: [ "user" ] description: - Name of the user to create, remove or modify. comment: required: false description: - Optionally sets the description (aka I(GECOS)) of user account. uid: required: false description: - Optionally sets the I(UID) of the user. non_unique: required: false default: "no" choices: [ "yes", "no" ] description: - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value. version_added: "1.1" group: required: false description: - Optionally sets the user's primary group (takes a group name). groups: required: false description: - Puts the user in this comma-delimited list of groups. When set to the empty string ('groups='), the user is removed from all groups except the primary group. append: required: false default: "no" choices: [ "yes", "no" ] description: - If C(yes), will only add groups, not set them to just the list in I(groups). shell: required: false description: - Optionally set the user's shell. home: required: false description: - Optionally set the user's home directory. skeleton: required: false description: - Optionally set a home skeleton directory. Requires createhome option! password: required: false description: - Optionally set the user's password to this crypted value. See the user example in the github examples directory for what this looks like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module) for details on various ways to generate these password values. Note on Darwin system, this value has to be cleartext. Beware of security issues. state: required: false default: "present" choices: [ present, absent ] description: - Whether the account should exist or not, taking action if the state is different from what is stated. createhome: required: false default: "yes" choices: [ "yes", "no" ] description: - Unless set to C(no), a home directory will be made for the user when the account is created or if the home directory does not exist. move_home: required: false default: "no" choices: [ "yes", "no" ] description: - If set to C(yes) when used with C(home=), attempt to move the user's home directory to the specified directory if it isn't there already. system: required: false default: "no" choices: [ "yes", "no" ] description: - When creating an account, setting this to C(yes) makes the user a system account. This setting cannot be changed on existing users. force: required: false default: "no" choices: [ "yes", "no" ] description: - When used with C(state=absent), behavior is as with C(userdel --force). login_class: required: false description: - Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems. remove: required: false default: "no" choices: [ "yes", "no" ] description: - When used with C(state=absent), behavior is as with C(userdel --remove). generate_ssh_key: required: false default: "no" choices: [ "yes", "no" ] version_added: "0.9" description: - Whether to generate a SSH key for the user in question. This will B(not) overwrite an existing SSH key. ssh_key_bits: required: false default: 2048 version_added: "0.9" description: - Optionally specify number of bits in SSH key to create. ssh_key_type: required: false default: rsa version_added: "0.9" description: - Optionally specify the type of SSH key to generate. Available SSH key types will depend on implementation present on target host. ssh_key_file: required: false default: .ssh/id_rsa version_added: "0.9" description: - Optionally specify the SSH key filename. If this is a relative filename then it will be relative to the user's home directory. ssh_key_comment: required: false default: ansible-generated on $HOSTNAME version_added: "0.9" description: - Optionally define the comment for the SSH key. ssh_key_passphrase: required: false version_added: "0.9" description: - Set a passphrase for the SSH key. If no passphrase is provided, the SSH key will default to having no passphrase. update_password: required: false default: always choices: ['always', 'on_create'] version_added: "1.3" description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. expires: version_added: "1.9" required: false default: "None" description: - An expiry time for the user in epoch, it will be ignored on platforms that do not support this. Currently supported on Linux and FreeBSD. ''' EXAMPLES = ''' # Add the user 'johnd' with a specific uid and a primary group of 'admin' - user: name=johnd comment="John Doe" uid=1040 group=admin # Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups - user: name=james shell=/bin/bash groups=admins,developers append=yes # Remove the user 'johnd' - user: name=johnd state=absent remove=yes # Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa - user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa # added a consultant whose account you want to expire - user: name=james18 shell=/bin/zsh groups=developers expires=1422403387 ''' import os import pwd import grp import platform import socket import time try: import spwd HAVE_SPWD=True except: HAVE_SPWD=False class User(object): """ This is a generic User manipulation class that is subclassed based on platform. A subclass may wish to override the following action methods:- - create_user() - remove_user() - modify_user() - ssh_key_gen() - ssh_key_fingerprint() - user_exists() All subclasses MUST define platform and distribution (which may be None). """ platform = 'Generic' distribution = None SHADOWFILE = '/etc/shadow' DATE_FORMAT = '%Y-%m-%d' def __new__(cls, *args, **kwargs): return load_platform_subclass(User, args, kwargs) def __init__(self, module): self.module = module self.state = module.params['state'] self.name = module.params['name'] self.uid = module.params['uid'] self.non_unique = module.params['non_unique'] self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] self.shell = module.params['shell'] self.password = module.params['password'] self.force = module.params['force'] self.remove = module.params['remove'] self.createhome = module.params['createhome'] self.move_home = module.params['move_home'] self.skeleton = module.params['skeleton'] self.system = module.params['system'] self.login_class = module.params['login_class'] self.append = module.params['append'] self.sshkeygen = module.params['generate_ssh_key'] self.ssh_bits = module.params['ssh_key_bits'] self.ssh_type = module.params['ssh_key_type'] self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] self.home = None self.expires = None if module.params['home'] is not None: self.home = os.path.expanduser(module.params['home']) if module.params['expires']: try: self.expires = time.gmtime(module.params['expires']) except Exception,e: module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e))) if module.params['ssh_key_file'] is not None: self.ssh_file = module.params['ssh_key_file'] else: self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type) def execute_command(self, cmd, use_unsafe_shell=False, data=None): return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] if self.force: cmd.append('-f') if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def create_user_useradd(self, command_name='useradd'): cmd = [self.module.get_bin_path(command_name, True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) elif self.group_exists(self.name): # use the -N option (no user group) if a group already # exists with the same name as the user to prevent # errors from useradd trying to create a group when # USERGROUPS_ENAB is set in /etc/login.defs. if os.path.exists('/etc/redhat-release'): dist = platform.dist() major_release = int(dist[1].split('.')[0]) if major_release <= 5: cmd.append('-n') else: cmd.append('-N') else: cmd.append('-N') if self.groups is not None and len(self.groups): groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.expires: cmd.append('--expiredate') cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) if self.password is not None: cmd.append('-p') cmd.append(self.password) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) else: cmd.append('-M') if self.system: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def _check_usermod_append(self): # check if this version of usermod can append groups usermod_path = self.module.get_bin_path('usermod', True) # for some reason, usermod --help cannot be used by non root # on RH/Fedora, due to lack of execute bit for others if not os.access(usermod_path, os.X_OK): return False cmd = [usermod_path] cmd.append('--help') rc, data1, data2 = self.execute_command(cmd) helpout = data1 + data2 # check if --append exists lines = helpout.split('\n') for line in lines: if line.strip().startswith('-a, --append'): return True return False def modify_user_usermod(self): cmd = [self.module.get_bin_path('usermod', True)] info = self.user_info() has_append = self._check_usermod_append() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set(remove_existing=False) group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: if has_append: cmd.append('-a') groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: if self.append and not has_append: cmd.append('-A') cmd.append(','.join(group_diff)) else: cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: cmd.append('-d') cmd.append(self.home) if self.move_home: cmd.append('-m') if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.expires: cmd.append('--expiredate') cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) # skip if no changes to be made if len(cmd) == 1: return (None, '', '') elif self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) def group_exists(self,group): try: # Try group as a gid first grp.getgrgid(int(group)) return True except (ValueError, KeyError): try: grp.getgrnam(group) return True except KeyError: return False def group_info(self, group): if not self.group_exists(group): return False try: # Try group as a gid first return list(grp.getgrgid(int(group))) except (ValueError, KeyError): return list(grp.getgrnam(group)) def get_groups_set(self, remove_existing=True): if self.groups is None: return None info = self.user_info() groups = set(filter(None, self.groups.split(','))) for g in set(groups): if not self.group_exists(g): self.module.fail_json(msg="Group %s does not exist" % (g)) if info and remove_existing and self.group_info(g)[2] == info[3]: groups.remove(g) return groups def user_group_membership(self): groups = [] info = self.get_pwd_info() for group in grp.getgrall(): if self.name in group.gr_mem and not info[3] == group.gr_gid: groups.append(group[0]) return groups def user_exists(self): try: if pwd.getpwnam(self.name): return True except KeyError: return False def get_pwd_info(self): if not self.user_exists(): return False return list(pwd.getpwnam(self.name)) def user_info(self): if not self.user_exists(): return False info = self.get_pwd_info() if len(info[1]) == 1 or len(info[1]) == 0: info[1] = self.user_password() return info def user_password(self): passwd = '' if HAVE_SPWD: try: passwd = spwd.getspnam(self.name)[1] except KeyError: return passwd if not self.user_exists(): return passwd elif self.SHADOWFILE: # Read shadow file for user's encrypted password string if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): for line in open(self.SHADOWFILE).readlines(): if line.startswith('%s:' % self.name): passwd = line.split(':')[1] return passwd def get_ssh_key_path(self): info = self.user_info() if os.path.isabs(self.ssh_file): ssh_key_file = self.ssh_file else: ssh_key_file = os.path.join(info[5], self.ssh_file) return ssh_key_file def ssh_key_gen(self): info = self.user_info() if not os.path.exists(info[5]) and not self.module.check_mode: return (1, '', 'User %s home directory does not exist' % self.name) ssh_key_file = self.get_ssh_key_path() ssh_dir = os.path.dirname(ssh_key_file) if not os.path.exists(ssh_dir): if self.module.check_mode: return (0, '', '') try: os.mkdir(ssh_dir, 0700) os.chown(ssh_dir, info[2], info[3]) except OSError, e: return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) if os.path.exists(ssh_key_file): return (None, 'Key already exists', '') if self.module.check_mode: return (0, '', '') cmd = [self.module.get_bin_path('ssh-keygen', True)] cmd.append('-t') cmd.append(self.ssh_type) cmd.append('-b') cmd.append(self.ssh_bits) cmd.append('-C') cmd.append(self.ssh_comment) cmd.append('-f') cmd.append(ssh_key_file) cmd.append('-N') if self.ssh_passphrase is not None: cmd.append(self.ssh_passphrase) else: cmd.append('') (rc, out, err) = self.execute_command(cmd) if rc == 0: # If the keys were successfully created, we should be able # to tweak ownership. os.chown(ssh_key_file, info[2], info[3]) os.chown('%s.pub' % ssh_key_file, info[2], info[3]) return (rc, out, err) def ssh_key_fingerprint(self): ssh_key_file = self.get_ssh_key_path() if not os.path.exists(ssh_key_file): return (1, 'SSH Key file %s does not exist' % ssh_key_file, '') cmd = [ self.module.get_bin_path('ssh-keygen', True) ] cmd.append('-l') cmd.append('-f') cmd.append(ssh_key_file) return self.execute_command(cmd) def get_ssh_public_key(self): ssh_public_key_file = '%s.pub' % self.get_ssh_key_path() try: f = open(ssh_public_key_file) ssh_public_key = f.read().strip() f.close() except IOError: return None return ssh_public_key def create_user(self): # by default we use the create_user_useradd method return self.create_user_useradd() def remove_user(self): # by default we use the remove_user_userdel method return self.remove_user_userdel() def modify_user(self): # by default we use the modify_user_usermod method return self.modify_user_usermod() def create_homedir(self, path): if not os.path.exists(path): if self.skeleton is not None: skeleton = self.skeleton else: skeleton = '/etc/skel' if os.path.exists(skeleton): try: shutil.copytree(skeleton, path, symlinks=True) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) else: try: os.makedirs(path) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) def chown_homedir(self, uid, gid, path): try: os.chown(path, uid, gid) for root, dirs, files in os.walk(path): for d in dirs: os.chown(path, uid, gid) for f in files: os.chown(os.path.join(root, f), uid, gid) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) # =========================================== class FreeBsdUser(User): """ This is a FreeBSD User manipulation class - it uses the pw command to manipulate the user database, followed by the chpass command to change the password. This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'FreeBSD' distribution = None SHADOWFILE = '/etc/master.passwd' def remove_user(self): cmd = [ self.module.get_bin_path('pw', True), 'userdel', '-n', self.name ] if self.remove: cmd.append('-r') return self.execute_command(cmd) def create_user(self): cmd = [ self.module.get_bin_path('pw', True), 'useradd', '-n', self.name, ] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None: groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: cmd.append('-L') cmd.append(self.login_class) if self.expires: days =( time.mktime(self.expires) - time.time() ) / 86400 cmd.append('-e') cmd.append(str(int(days))) # system cannot be handled currently - should we error if its requested? # create the user (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) # we have to set the password in a second command if self.password is not None: cmd = [ self.module.get_bin_path('chpass', True), '-p', self.password, self.name ] return self.execute_command(cmd) return (rc, out, err) def modify_user(self): cmd = [ self.module.get_bin_path('pw', True), 'usermod', '-n', self.name ] cmd_len = len(cmd) info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: # find current login class user_login_class = None if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): for line in open(self.SHADOWFILE).readlines(): if line.startswith('%s:' % self.name): user_login_class = line.split(':')[4] # act only if login_class change if self.login_class != user_login_class: cmd.append('-L') cmd.append(self.login_class) if self.groups is not None: current_groups = self.user_group_membership() groups = self.get_groups_set() group_diff = set(current_groups).symmetric_difference(groups) groups_need_mod = False if group_diff: if self.append: for g in groups: if g in group_diff: groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: cmd.append('-G') new_groups = groups if self.append: new_groups = groups | set(current_groups) cmd.append(','.join(new_groups)) if self.expires: days = ( time.mktime(self.expires) - time.time() ) / 86400 cmd.append('-e') cmd.append(str(int(days))) # modify the user if cmd will do anything if cmd_len != len(cmd): (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) else: (rc, out, err) = (None, '', '') # we have to set the password in a second command if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [ self.module.get_bin_path('chpass', True), '-p', self.password, self.name ] return self.execute_command(cmd) return (rc, out, err) # =========================================== class OpenBSDUser(User): """ This is a OpenBSD User manipulation class. Main differences are that OpenBSD:- - has no concept of "system" account. - has no force delete user This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'OpenBSD' distribution = None SHADOWFILE = '/etc/master.passwd' def create_user(self): cmd = [self.module.get_bin_path('useradd', True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None: groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: cmd.append('-L') cmd.append(self.login_class) if self.password is not None and self.password != '*': cmd.append('-p') cmd.append(self.password) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) cmd.append(self.name) return self.execute_command(cmd) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def modify_user(self): cmd = [self.module.get_bin_path('usermod', True)] info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups_option = '-G' groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set() group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: groups_option = '-S' groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: cmd.append(groups_option) cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: # find current login class user_login_class = None userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name] (rc, out, err) = self.execute_command(userinfo_cmd) for line in out.splitlines(): tokens = line.split() if tokens[0] == 'class' and len(tokens) == 2: user_login_class = tokens[1] # act only if login_class change if self.login_class != user_login_class: cmd.append('-L') cmd.append(self.login_class) if self.update_password == 'always' and self.password is not None \ and self.password != '*' and info[1] != self.password: cmd.append('-p') cmd.append(self.password) # skip if no changes to be made if len(cmd) == 1: return (None, '', '') elif self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class NetBSDUser(User): """ This is a NetBSD User manipulation class. Main differences are that NetBSD:- - has no concept of "system" account. - has no force delete user This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'NetBSD' distribution = None SHADOWFILE = '/etc/master.passwd' def create_user(self): cmd = [self.module.get_bin_path('useradd', True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None: groups = self.get_groups_set() if len(groups) > 16: self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: cmd.append('-L') cmd.append(self.login_class) if self.password is not None: cmd.append('-p') cmd.append(self.password) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) cmd.append(self.name) return self.execute_command(cmd) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def modify_user(self): cmd = [self.module.get_bin_path('usermod', True)] info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set() group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: groups = set(current_groups).union(groups) groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: if len(groups) > 16: self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: cmd.append('-L') cmd.append(self.login_class) if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) # skip if no changes to be made if len(cmd) == 1: return (None, '', '') elif self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class SunOS(User): """ This is a SunOS User manipulation class - The main difference between this class and the generic user class is that Solaris-type distros don't support the concept of a "system" account and we need to edit the /etc/shadow file manually to set a password. (Ugh) This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'SunOS' distribution = None SHADOWFILE = '/etc/shadow' def remove_user(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def create_user(self): cmd = [self.module.get_bin_path('useradd', True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None: groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) cmd.append(self.name) if self.module.check_mode: return (0, '', '') else: (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) # we have to set the password by editing the /etc/shadow file if self.password is not None: try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): fields = line.strip().split(':') if not fields[0] == self.name: lines.append(line) continue fields[1] = self.password fields[2] = str(int(time.time() / 86400)) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) except Exception, err: self.module.fail_json(msg="failed to update users password: %s" % str(err)) return (rc, out, err) def modify_user_usermod(self): cmd = [self.module.get_bin_path('usermod', True)] cmd_len = len(cmd) info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups = self.get_groups_set() group_diff = set(current_groups).symmetric_difference(groups) groups_need_mod = False if group_diff: if self.append: for g in groups: if g in group_diff: groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: cmd.append('-G') new_groups = groups if self.append: new_groups.update(current_groups) cmd.append(','.join(new_groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) # modify the user if cmd will do anything if cmd_len != len(cmd): (rc, out, err) = (0, '', '') if not self.module.check_mode: cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) else: (rc, out, err) = (None, '', '') # we have to set the password by editing the /etc/shadow file if self.update_password == 'always' and self.password is not None and info[1] != self.password: (rc, out, err) = (0, '', '') if not self.module.check_mode: try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): fields = line.strip().split(':') if not fields[0] == self.name: lines.append(line) continue fields[1] = self.password fields[2] = str(int(time.time() / 86400)) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) rc = 0 except Exception, err: self.module.fail_json(msg="failed to update users password: %s" % str(err)) return (rc, out, err) # =========================================== class DarwinUser(User): """ This is a Darwin Mac OS X User manipulation class. Main differences are that Darwin:- - Handles accounts in a database managed by dscl(1) - Has no useradd/groupadd - Does not create home directories - User password must be cleartext - UID must be given - System users must ben under 500 This overrides the following methods from the generic class:- - user_exists() - create_user() - remove_user() - modify_user() """ platform = 'Darwin' distribution = None SHADOWFILE = None dscl_directory = '.' fields = [ ('comment', 'RealName'), ('home', 'NFSHomeDirectory'), ('shell', 'UserShell'), ('uid', 'UniqueID'), ('group', 'PrimaryGroupID'), ] def _get_dscl(self): return [ self.module.get_bin_path('dscl', True), self.dscl_directory ] def _list_user_groups(self): cmd = self._get_dscl() cmd += [ '-search', '/Groups', 'GroupMembership', self.name ] (rc, out, err) = self.execute_command(cmd) groups = [] for line in out.splitlines(): if line.startswith(' ') or line.startswith(')'): continue groups.append(line.split()[0]) return groups def _get_user_property(self, property): '''Return user PROPERTY as given my dscl(1) read or None if not found.''' cmd = self._get_dscl() cmd += [ '-read', '/Users/%s' % self.name, property ] (rc, out, err) = self.execute_command(cmd) if rc != 0: return None # from dscl(1) # if property contains embedded spaces, the list will instead be # displayed one entry per line, starting on the line after the key. lines = out.splitlines() #sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines)) if len(lines) == 1: return lines[0].split(': ')[1] else: if len(lines) > 2: return '\n'.join([ lines[1].strip() ] + lines[2:]) else: if len(lines) == 2: return lines[1].strip() else: return None def _get_next_uid(self): '''Return the next available uid''' cmd = self._get_dscl() cmd += ['-list', '/Users', 'UniqueID'] (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg="Unable to get the next available uid", rc=rc, out=out, err=err ) max_uid = 0 for line in out.splitlines(): if max_uid < int(line.split()[1]): max_uid = int(line.split()[1]) return max_uid + 1 def _change_user_password(self): '''Change password for SELF.NAME against SELF.PASSWORD. Please note that password must be cleatext. ''' # some documentation on how is stored passwords on OSX: # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/ # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/ # http://pastebin.com/RYqxi7Ca # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2 # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html # https://gist.github.com/nueh/8252572 cmd = self._get_dscl() if self.password: cmd += [ '-passwd', '/Users/%s' % self.name, self.password] else: cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*'] (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc) return (rc, out, err) def _make_group_numerical(self): '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.''' if self.group is None: self.group = 'nogroup' try: self.group = grp.getgrnam(self.group).gr_gid except KeyError: self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group) # We need to pass a string to dscl self.group = str(self.group) def __modify_group(self, group, action): '''Add or remove SELF.NAME to or from GROUP depending on ACTION. ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' if action == 'add': option = '-a' else: option = '-d' cmd = [ 'dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group ] (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json(msg='Cannot %s user "%s" to group "%s".' % (action, self.name, group), err=err, out=out, rc=rc) return (rc, out, err) def _modify_group(self): '''Add or remove SELF.NAME to or from GROUP depending on ACTION. ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' rc = 0 out = '' err = '' changed = False current = set(self._list_user_groups()) if self.groups is not None: target = set(self.groups.split(',')) else: target = set([]) for remove in current - target: (_rc, _err, _out) = self.__modify_group(remove, 'delete') rc += rc out += _out err += _err changed = True for add in target - current: (_rc, _err, _out) = self.__modify_group(add, 'add') rc += _rc out += _out err += _err changed = True return (rc, err, out, changed) def _update_system_user(self): '''Hide or show user on login window according SELF.SYSTEM. Returns 0 if a change has been made, None otherwhise.''' plist_file = '/Library/Preferences/com.apple.loginwindow.plist' # http://support.apple.com/kb/HT5017?viewlocale=en_US cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ] (rc, out, err) = self.execute_command(cmd) # returned value is # ( # "_userA", # "_UserB", # userc # ) hidden_users = [] for x in out.splitlines()[1:-1]: try: x = x.split('"')[1] except IndexError: x = x.strip() hidden_users.append(x) if self.system: if not self.name in hidden_users: cmd = [ 'defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name ] (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc) return 0 else: if self.name in hidden_users: del(hidden_users[hidden_users.index(self.name)]) cmd = [ 'defaults', 'write', plist_file, 'HiddenUsersList', '-array' ] + hidden_users (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc) return 0 def user_exists(self): '''Check is SELF.NAME is a known user on the system.''' cmd = self._get_dscl() cmd += [ '-list', '/Users/%s' % self.name] (rc, out, err) = self.execute_command(cmd) return rc == 0 def remove_user(self): '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.''' info = self.user_info() cmd = self._get_dscl() cmd += [ '-delete', '/Users/%s' % self.name] (rc, out, err) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc) if self.force: if os.path.exists(info[5]): shutil.rmtree(info[5]) out += "Removed %s" % info[5] return (rc, out, err) def create_user(self, command_name='dscl'): cmd = self._get_dscl() cmd += [ '-create', '/Users/%s' % self.name] (rc, err, out) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc) self._make_group_numerical() if self.uid is None: self.uid = str(self._get_next_uid()) # Homedir is not created by default if self.createhome: if self.home is None: self.home = '/Users/%s' % self.name if not os.path.exists(self.home): os.makedirs(self.home) self.chown_homedir(int(self.uid), int(self.group), self.home) for field in self.fields: if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: cmd = self._get_dscl() cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]] (rc, _err, _out) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc) out += _out err += _err if rc != 0: return (rc, _err, _out) (rc, _err, _out) = self._change_user_password() out += _out err += _err self._update_system_user() # here we don't care about change status since it is a creation, # thus changed is always true. (rc, _out, _err, changed) = self._modify_group() out += _out err += _err return (rc, err, out) def modify_user(self): changed = None out = '' err = '' self._make_group_numerical() for field in self.fields: if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: current = self._get_user_property(field[1]) if current is None or current != self.__dict__[field[0]]: cmd = self._get_dscl() cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]] (rc, _err, _out) = self.execute_command(cmd) if rc != 0: self.module.fail_json( msg='Cannot update property "%s" for user "%s".' % (field[0], self.name), err=err, out=out, rc=rc) changed = rc out += _out err += _err if self.update_password == 'always' and self.password is not None: (rc, _err, _out) = self._change_user_password() out += _out err += _err changed = rc (rc, _out, _err, _changed) = self._modify_group() out += _out err += _err if _changed is True: changed = rc rc = self._update_system_user() if rc == 0: changed = rc return (changed, out, err) # =========================================== class AIX(User): """ This is a AIX User manipulation class. This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'AIX' distribution = None SHADOWFILE = '/etc/security/passwd' def remove_user(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def create_user_useradd(self, command_name='useradd'): cmd = [self.module.get_bin_path(command_name, True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None and len(self.groups): groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.createhome: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) # set password with chpasswd if self.password is not None: cmd = [] cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) return (rc, out, err) def modify_user_usermod(self): cmd = [self.module.get_bin_path('usermod', True)] info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set() group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) # skip if no changes to be made if len(cmd) == 1: (rc, out, err) = (None, '', '') elif self.module.check_mode: return (True, '', '') else: cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') (rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) else: (rc2, out2, err2) = (None, '', '') if rc != None: return (rc, out+out2, err+err2) else: return (rc2, out+out2, err+err2) # =========================================== class HPUX(User): """ This is a HP-UX User manipulation class. This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'HP-UX' distribution = None SHADOWFILE = '/etc/shadow' def create_user(self): cmd = ['/usr/sam/lbin/useradd.sam'] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None and len(self.groups): groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.password is not None: cmd.append('-p') cmd.append(self.password) if self.createhome: cmd.append('-m') else: cmd.append('-M') if self.system: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def remove_user(self): cmd = ['/usr/sam/lbin/userdel.sam'] if self.force: cmd.append('-F') if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def modify_user(self): cmd = ['/usr/sam/lbin/usermod.sam'] info = self.user_info() has_append = self._check_usermod_append() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set(remove_existing=False) group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: if has_append: cmd.append('-a') groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: if self.append and not has_append: cmd.append('-A') cmd.append(','.join(group_diff)) else: cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: cmd.append('-d') cmd.append(self.home) if self.move_home: cmd.append('-m') if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) # skip if no changes to be made if len(cmd) == 1: return (None, '', '') elif self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== def main(): ssh_defaults = { 'bits': '2048', 'type': 'rsa', 'passphrase': None, 'comment': 'ansible-generated on %s' % socket.gethostname() } module = AnsibleModule( argument_spec = dict( state=dict(default='present', choices=['present', 'absent'], type='str'), name=dict(required=True, aliases=['user'], type='str'), uid=dict(default=None, type='str'), non_unique=dict(default='no', type='bool'), group=dict(default=None, type='str'), groups=dict(default=None, type='str'), comment=dict(default=None, type='str'), home=dict(default=None, type='str'), shell=dict(default=None, type='str'), password=dict(default=None, type='str', no_log=True), login_class=dict(default=None, type='str'), # following options are specific to userdel force=dict(default='no', type='bool'), remove=dict(default='no', type='bool'), # following options are specific to useradd createhome=dict(default='yes', type='bool'), skeleton=dict(default=None, type='str'), system=dict(default='no', type='bool'), # following options are specific to usermod move_home=dict(default='no', type='bool'), append=dict(default='no', type='bool'), # following are specific to ssh key generation generate_ssh_key=dict(type='bool'), ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'), ssh_key_type=dict(default=ssh_defaults['type'], type='str'), ssh_key_file=dict(default=None, type='str'), ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), ssh_key_passphrase=dict(default=None, type='str', no_log=True), update_password=dict(default='always',choices=['always','on_create'],type='str'), expires=dict(default=None, type='float'), ), supports_check_mode=True ) user = User(module) module.debug('User instantiated - platform %s' % user.platform) if user.distribution: module.debug('User instantiated - distribution %s' % user.distribution) rc = None out = '' err = '' result = {} result['name'] = user.name result['state'] = user.state if user.state == 'absent': if user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.remove_user() if rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) result['force'] = user.force result['remove'] = user.remove elif user.state == 'present': if not user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.create_user() result['system'] = user.system result['createhome'] = user.createhome else: # modify user (note: this function is check mode aware) (rc, out, err) = user.modify_user() result['append'] = user.append result['move_home'] = user.move_home if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if user.password is not None: result['password'] = 'NOT_LOGGING_PASSWORD' if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if user.user_exists(): info = user.user_info() if info == False: result['msg'] = "failed to look up user name: %s" % user.name result['failed'] = True result['uid'] = info[2] result['group'] = info[3] result['comment'] = info[4] result['home'] = info[5] result['shell'] = info[6] result['uid'] = info[2] if user.groups is not None: result['groups'] = user.groups # handle missing homedirs info = user.user_info() if user.home is None: user.home = info[5] if not os.path.exists(user.home) and user.createhome: if not module.check_mode: user.create_homedir(user.home) user.chown_homedir(info[2], info[3], user.home) result['changed'] = True # deal with ssh key if user.sshkeygen: # generate ssh key (note: this function is check mode aware) (rc, out, err) = user.ssh_key_gen() if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if rc == 0: result['changed'] = True (rc, out, err) = user.ssh_key_fingerprint() if rc == 0: result['ssh_fingerprint'] = out.strip() else: result['ssh_fingerprint'] = err.strip() result['ssh_key_file'] = user.get_ssh_key_path() result['ssh_public_key'] = user.get_ssh_public_key() module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
SuYiling/chrome_depot_tools
testing_support/super_mox.py
25
5140
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simplify unit tests based on pymox.""" import os import random import shutil import string import StringIO import subprocess import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) from third_party.pymox import mox class IsOneOf(mox.Comparator): def __init__(self, keys): self._keys = keys def equals(self, rhs): return rhs in self._keys def __repr__(self): return '<sequence or map containing \'%s\'>' % str(self._keys) class TestCaseUtils(object): """Base class with some additional functionalities. People will usually want to use SuperMoxTestBase instead.""" # Backup the separator in case it gets mocked _OS_SEP = os.sep _RANDOM_CHOICE = random.choice _RANDOM_RANDINT = random.randint _STRING_LETTERS = string.letters ## Some utilities for generating arbitrary arguments. def String(self, max_length): return ''.join([self._RANDOM_CHOICE(self._STRING_LETTERS) for _ in xrange(self._RANDOM_RANDINT(1, max_length))]) def Strings(self, max_arg_count, max_arg_length): return [self.String(max_arg_length) for _ in xrange(max_arg_count)] def Args(self, max_arg_count=8, max_arg_length=16): return self.Strings(max_arg_count, self._RANDOM_RANDINT(1, max_arg_length)) def _DirElts(self, max_elt_count=4, max_elt_length=8): return self._OS_SEP.join(self.Strings(max_elt_count, max_elt_length)) def Dir(self, max_elt_count=4, max_elt_length=8): return (self._RANDOM_CHOICE((self._OS_SEP, '')) + self._DirElts(max_elt_count, max_elt_length)) def RootDir(self, max_elt_count=4, max_elt_length=8): return self._OS_SEP + self._DirElts(max_elt_count, max_elt_length) def compareMembers(self, obj, members): """If you add a member, be sure to add the relevant test!""" # Skip over members starting with '_' since they are usually not meant to # be for public use. actual_members = [x for x in sorted(dir(obj)) if not x.startswith('_')] expected_members = sorted(members) if actual_members != expected_members: diff = ([i for i in actual_members if i not in expected_members] + [i for i in expected_members if i not in actual_members]) print >> sys.stderr, diff # pylint: disable=E1101 self.assertEqual(actual_members, expected_members) def setUp(self): self.root_dir = self.Dir() self.args = self.Args() self.relpath = self.String(200) def tearDown(self): pass class StdoutCheck(object): def setUp(self): # Override the mock with a StringIO, it's much less painful to test. self._old_stdout = sys.stdout stdout = StringIO.StringIO() stdout.flush = lambda: None sys.stdout = stdout def tearDown(self): try: # If sys.stdout was used, self.checkstdout() must be called. # pylint: disable=E1101 if not sys.stdout.closed: self.assertEquals('', sys.stdout.getvalue()) except AttributeError: pass sys.stdout = self._old_stdout def checkstdout(self, expected): value = sys.stdout.getvalue() sys.stdout.close() # pylint: disable=E1101 self.assertEquals(expected, value) class SuperMoxTestBase(TestCaseUtils, StdoutCheck, mox.MoxTestBase): def setUp(self): """Patch a few functions with know side-effects.""" TestCaseUtils.setUp(self) mox.MoxTestBase.setUp(self) os_to_mock = ('chdir', 'chown', 'close', 'closerange', 'dup', 'dup2', 'fchdir', 'fchmod', 'fchown', 'fdopen', 'getcwd', 'lseek', 'makedirs', 'mkdir', 'open', 'popen', 'popen2', 'popen3', 'popen4', 'read', 'remove', 'removedirs', 'rename', 'renames', 'rmdir', 'symlink', 'system', 'tmpfile', 'walk', 'write') self.MockList(os, os_to_mock) os_path_to_mock = ('abspath', 'exists', 'getsize', 'isdir', 'isfile', 'islink', 'ismount', 'lexists', 'realpath', 'samefile', 'walk') self.MockList(os.path, os_path_to_mock) self.MockList(shutil, ('rmtree')) self.MockList(subprocess, ('call', 'Popen')) # Don't mock stderr since it confuses unittests. self.MockList(sys, ('stdin')) StdoutCheck.setUp(self) def tearDown(self): StdoutCheck.tearDown(self) TestCaseUtils.tearDown(self) mox.MoxTestBase.tearDown(self) def MockList(self, parent, items_to_mock): for item in items_to_mock: # Skip over items not present because of OS-specific implementation, # implemented only in later python version, etc. if hasattr(parent, item): try: self.mox.StubOutWithMock(parent, item) except TypeError, e: raise TypeError( 'Couldn\'t mock %s in %s: %s' % (item, parent.__name__, e)) def UnMock(self, obj, name): """Restore an object inside a test.""" for (parent, old_child, child_name) in self.mox.stubs.cache: if parent == obj and child_name == name: setattr(parent, child_name, old_child) break
bsd-3-clause
jaesivsm/pyAggr3g470r
src/web/lib/article_cleaner.py
1
2011
from urllib.parse import urlparse, urlunparse, ParseResult from bs4 import BeautifulSoup from bootstrap import is_secure_served HTTPS_IFRAME_DOMAINS = ('vimeo.com', 'youtube.com', 'youtu.be') def clean_urls(article_content, article_link): parsed_article_url = urlparse(article_link) parsed_content = BeautifulSoup(article_content, 'html.parser') for img in parsed_content.find_all('img'): if 'src' not in img.attrs: continue if is_secure_served() and 'srcset' in img.attrs: # removing active content when serving over https del img.attrs['srcset'] to_rebuild, img_src = False, urlparse(img.attrs['src']) if not img_src.scheme or not img_src.netloc: to_rebuild = True # either scheme or netloc are missing from the src of the img scheme = img_src.scheme or parsed_article_url.scheme netloc = img_src.netloc or parsed_article_url.netloc img_src = ParseResult(scheme=scheme, netloc=netloc, path=img_src.path, query=img_src.query, params=img_src.params, fragment=img_src.fragment) if to_rebuild: img.attrs['src'] = urlunparse(img_src) if is_secure_served(): for iframe in parsed_content.find_all('iframe'): if 'src' not in iframe.attrs: continue iframe_src = urlparse(iframe.attrs['src']) if iframe_src.scheme != 'http': continue for domain in HTTPS_IFRAME_DOMAINS: if domain not in iframe_src.netloc: continue iframe_src = ParseResult(scheme='https', netloc=iframe_src.netloc, path=iframe_src.path, query=iframe_src.query, params=iframe_src.params, fragment=iframe_src.fragment) iframe.attrs['src'] = urlunparse(iframe_src) break return str(parsed_content)
agpl-3.0
daniponi/django
django/db/migrations/autodetector.py
5
56018
from __future__ import unicode_literals import datetime import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject from django.utils import six from .topological_sort import stable_topological_sort class MigrationAutodetector(object): """ Takes a pair of ProjectStates, and compares them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Takes a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as, of course, the related fields change during renames) """ fields_def = [] for name, field in sorted(fields): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Returns a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # We'll then go through that list later and order it and split # into migrations to resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_apps = self.from_state.concrete_apps self.new_apps = self.to_state.apps self.old_model_keys = [] self.old_proxy_keys = [] self.old_unmanaged_keys = [] self.new_model_keys = [] self.new_proxy_keys = [] self.new_unmanaged_keys = [] for al, mn in sorted(self.from_state.models.keys()): model = self.old_apps.get_model(al, mn) if not model._meta.managed: self.old_unmanaged_keys.append((al, mn)) elif al not in self.from_state.real_apps: if model._meta.proxy: self.old_proxy_keys.append((al, mn)) else: self.old_model_keys.append((al, mn)) for al, mn in sorted(self.to_state.models.keys()): model = self.new_apps.get_model(al, mn) if not model._meta.managed: self.new_unmanaged_keys.append((al, mn)) elif ( al not in self.from_state.real_apps or (convert_apps and al in convert_apps) ): if model._meta.proxy: self.new_proxy_keys.append((al, mn)) else: self.new_model_keys.append((al, mn)) # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_altered_db_table() self.generate_altered_order_with_respect_to() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists, and prepare a list of the fields that used through models in the old state so we can make dependencies from the through model deletion to the field that uses it. """ self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys) self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys) self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys) self.through_users = {} self.old_field_keys = set() self.new_field_keys = set() for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields) self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields) def _generate_through_model_map(self): """ Through model map generation """ for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields: old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name) if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and not old_field.remote_field.through._meta.auto_created): through_key = ( old_field.remote_field.through._meta.app_label, old_field.remote_field.through._meta.model_name, ) self.through_users[through_key] = (app_label, old_model_name, field_name) def _build_migration_list(self, graph=None): """ We need to chop the lists of operations up into migrations with dependencies on each other. We do this by stepping up an app's list of operations until we find one that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). We then chop off the operations before it into a migration and move onto the next app. If we loop back around without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations.keys()): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: is_swappable_dep = False if dep[0] == "__setting__": # We need to temporarily resolve the swappable dependency to prevent # circular references. While keeping the dependency checks on the # resolved model we still add the swappable dependencies. # See #23322 resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.') original_dep = dep dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3]) is_swappable_dep = True if dep[0] != app_label and dep[0] != "__setting__": # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get(dep[0], []): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add((original_dep[0], original_dep[1])) elif dep[0] in self.migrations: operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) else: # If we can't find the other app, we add a first/last dependency, # but only if we've already been through once and checked everything if chop_mode: # If the app already exists, we add a dependency on the last migration, # as we don't know which migration contains the target field. # If it's not yet migrated or has no migrations, we use __first__ if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) else: operation_dependencies.add((dep[0], "__first__")) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) self.generated_operations[app_label] = self.generated_operations[app_label][1:] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []}) instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = chopped + self.generated_operations[app_label] new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. The order we have already isn't bad, but we need to pull a few things around so FKs work nicely inside the same app """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for app_label, migrations in self.migrations.items(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label) def check_dependency(self, operation, dependency): """ Returns ``True`` if the given operation depends on the given dependency, ``False`` otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance(operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether)) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency, )) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Sorting key function that places potential swappable models first in lists of created models (only real way to solve #22783) """ try: model = self.new_apps.get_model(item[0], item[1]) base_names = [base.__name__ for base in model.__bases__] string_version = "%s.%s" % (item[0], item[1]) if ( model._meta.swappable or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Finds any renamed models, and generates the operations for them, and removes the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = set(self.new_model_keys) - set(self.old_model_keys) for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = set(self.old_model_keys) - set(self.new_model_keys) for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[rem_app_label, rem_model_name] rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model(rem_model_state, model_state): self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ) ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name) self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % ( model_state.app_label, model_state.name, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.append((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (we'll optimize these back in later if we can). We also defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys) added_models = set(self.new_model_keys) - old_keys added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] model_opts = self.new_apps.get_model(app_label, model_name)._meta # Gather related fields related_fields = {} primary_key_rel = None for field in model_opts.local_fields: if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model_opts.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Are there unique/index_together to defer? unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) order_with_respect_to = model_state.options.pop('order_with_respect_to', None) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append(( primary_key_rel._meta.app_label, primary_key_rel._meta.object_name, None, True )) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[d for d in model_state.fields if d[0] not in related_fields], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_opts.managed: continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependecies_for_foreign_key(field) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns related_dependencies = [ (app_label, model_name, name, True) for name, field in sorted(related_fields.items()) ] related_dependencies.append((app_label, model_name, None, True)) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies ) if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ] ) def generate_created_proxies(self): """ Makes CreateModel statements for proxy models. We use the same statements as that way there's less code duplication, but of course for proxy models we can skip all that pointless field stuff and just chuck out an operation. """ added = set(self.new_proxy_keys) - set(self.old_proxy_keys) for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (we'll optimize these back in later if we can). We also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys) deleted_models = set(self.old_model_keys) - new_keys deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] model = self.old_apps.get_model(app_label, model_name) if not model._meta.managed: # Skip here, no need to handle fields for unmanaged models continue # Gather related fields related_fields = {} for field in model._meta.local_fields: if field.remote_field: if field.remote_field.model: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model._meta.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Generate option removal first unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ) ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ) ) # Then remove each related field for name, field in sorted(related_fields.items()): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ) ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] for related_object in model._meta.related_objects: related_object_app_label = related_object.related_model._meta.app_label object_name = related_object.related_model._meta.object_name field_name = related_object.field.name dependencies.append((related_object_app_label, object_name, field_name, False)) if not related_object.many_to_many: dependencies.append((related_object_app_label, object_name, field_name, "alter")) for name, field in sorted(related_fields.items()): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append((through_user[0], through_user[1], through_user[2], False)) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """ Makes DeleteModel statements for proxy models. """ deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys) for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def generate_renamed_fields(self): """ Works out renamed fields """ self.renamed_fields = {} for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): if rem_app_label == app_label and rem_model_name == model_name: old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name)) if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: old_rel_to = old_field_dec[2]['to'] if old_rel_to in self.renamed_models_rel: old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] if old_field_dec == field_dec: if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ) ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[app_label, model_name, field_name] = rem_field_name break def generate_added_fields(self): """ Fields that have been added """ for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Fields that are foreignkeys/m2ms depend on stuff dependencies = [] if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependecies_for_foreign_key(field)) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. preserve_default = True if (not field.null and not field.has_default() and not field.many_to_many and not (field.blank and field.empty_strings_allowed)): field = field.clone() field.default = self.questioner.ask_not_null_addition(field_name, model_name) preserve_default = False self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """ Fields that have been removed. """ for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Fields that have been altered. """ for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)): # Did the field change? old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name) new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): rename_key = ( new_field.remote_field.model._meta.app_label, new_field.remote_field.model._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) if old_field_dec != new_field_dec: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if (old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration(field_name, model_name) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ) ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def _get_dependecies_for_foreign_key(self, field): # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label = field.remote_field.model._meta.app_label dep_object_name = field.remote_field.model._meta.object_name dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: dependencies.append(( field.remote_field.through._meta.app_label, field.remote_field.through._meta.object_name, None, True, )) return dependencies def _generate_altered_foo_together(self, operation): option_name = operation.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) or set() if old_value: old_value = { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } new_value = new_model_state.options.get(option_name) or set() if new_value: new_value = set(new_value) if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependecies_for_foreign_key(field)) self.add_operation( app_label, operation( name=model_name, **{option_name: new_value} ), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get('db_table') new_db_table_name = new_model_state.options.get('db_table') if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ) ) def generate_altered_options(self): """ Works out if any non-schema-affecting options have changed and makes an operation to represent them in state changes (in case Python code in migrations needs them) """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys ).union( self.kept_unmanaged_keys ).union( # unmanaged converted to managed set(self.old_unmanaged_keys).intersection(self.new_model_keys) ).union( # managed converted to unmanaged set(self.old_model_keys).intersection(self.new_unmanaged_keys) ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = dict( option for option in old_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) new_options = dict( option for option in new_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ) ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if (old_model_state.options.get("order_with_respect_to") != new_model_state.options.get("order_with_respect_to")): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append(( app_label, model_name, new_model_state.options["order_with_respect_to"], True, )) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get('order_with_respect_to'), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ) ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Takes in a result from changes() and a MigrationGraph, and fixes the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) if i == 0 and not app_leaf: new_name = "0001_%s" % migration_name if migration_name else "0001_initial" else: new_name = "%04i_%s" % ( next_number, migration_name or self.suggest_name(migration.operations)[:100], ) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for app_label, migrations in changes.items(): for migration in migrations: migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] return changes def _trim_to_apps(self, changes, app_labels): """ Takes changes from arrange_for_graph and set of app labels and returns a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present, as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) for app_label in list(required_apps): required_apps.update(app_dependencies.get(app_label, set())) # Remove all migrations that aren't needed for app_label in list(changes.keys()): if app_label not in required_apps: del changes[app_label] return changes @classmethod def suggest_name(cls, ops): """ Given a set of operations, suggests a name for the migration they might represent. Names are not guaranteed to be unique, but we put some effort in to the fallback name to avoid VCS conflicts if we can. """ if len(ops) == 1: if isinstance(ops[0], operations.CreateModel): return ops[0].name_lower elif isinstance(ops[0], operations.DeleteModel): return "delete_%s" % ops[0].name_lower elif isinstance(ops[0], operations.AddField): return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif isinstance(ops[0], operations.RemoveField): return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif len(ops) > 1: if all(isinstance(o, operations.CreateModel) for o in ops): return "_".join(sorted(o.name_lower for o in ops)) return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M") @classmethod def parse_number(cls, name): """ Given a migration name, tries to extract a number from the beginning of it. If no number found, returns None. """ match = re.match(r'^\d+', name) if match: return int(match.group()) return None
bsd-3-clause
gmsn-ita/vaspirin
scripts/plot_compared_bands.py
2
6165
#!/usr/bin/env python3 import sys from vaspirin import outcar,graceIO,datIO import argparse ################################ ## PARSING AND HELLO MESSAGES ## ################################ def positive_int (value): ''' Type for allowing only positive int values for argparser taken from http://stackoverflow.com/questions/14117415/using-argparse-allow-only-positive-integers ''' ivalue = int(value) if ivalue < 0: raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value) return ivalue def parseArgs(): """ Parse arguments from the command line. Uses the `argparse` package to establish all positional and optional arguments. """ helloDescription = ("Painless VASP postprocessing tool\n" + "Generates compared band structures from VASP files\n" + "Written by Daniel S. Koda and Ivan Guilhon\n" + "Group of Semiconductor Materials and Nanotechnology\n" + "Instituto Tecnologico de Aeronautica, Brazil\n" + "http://www.gmsn.ita.br/?q=en") parser = argparse.ArgumentParser(description=helloDescription, epilog= "Last revision: Feb. 2017.", prog="plot_compared_bands.py") # Vaspirin configurations parser.add_argument('-q', '--quiet', action='store_true', help="do not display text on the output window (default: False)") parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.2') # Band structure options parser.add_argument('input_folders', nargs=2, help="paths to the folders in which the calculations can be found") # Band structure tweaking parser.add_argument('-i', '--ignore', type=positive_int, default=0, help="ignore the first N k-points when plotting bands (default: 0)") parser.add_argument('-t', '--interpolate', type=positive_int, default=0, help="interpolate N k-points between each pair of k-points when plotting bands (default: 0)") parser.add_argument('-c', '--colors', nargs = 2, default=['black','red'], help="colors of the bands to be compared (default: black and red)", metavar=('COLOR_1', 'COLOR_2')) parser.add_argument('-y', '--yaxis', type=float, nargs=2, default=[-3, 3], help="set the y-axis range for the band structure" + " (default: -3 to 3).", metavar=('Y_MIN', 'Y_MAX')) parser.add_argument('-r', '--ref', nargs = 2, default=['e-fermi','e-fermi'], help="reference for the 0 eV in band structures (default: e-fermi for both)", metavar=('REF_1', 'REF_2')) parser.add_argument('-s', '--soc', action='store_true', help="plot bands from non-collinear calculations (default: False)") return parser.parse_args() def printHello (): ''' Print hello message. ''' print ("************************************") print (" vaspirin v2.0: plot_compared_bands ") print ("************************************") def printRunDescription (args): ''' Print description of the options chosen and the crystals input. ''' leftJustSpace = 20 print ("required files:".ljust(leftJustSpace) + "OUTCAR, KPOINTS") print ("colors:".ljust(leftJustSpace) + "%s and %s" % (args.colors[0],args.colors[1])) print ("references:".ljust(leftJustSpace) + "%s and %s" % (args.ref[0],args.ref[1])) print ("ignoring:".ljust(leftJustSpace) + "%d k-point(s)" % args.ignore) print ("interpolating:".ljust(leftJustSpace) + "%d k-point(s)" % args.interpolate) print ("y axis:".ljust(leftJustSpace) + "from %.1f to %.1f" % (args.yaxis[0],args.yaxis[1])) print ("") ######################## ## PLOTTING FUNCTIONS ## ######################## def printComparisonBands (xmgrace, colors, bands1, bands2): """ Prints a .bfile for a common band structure This method contains all needed settings """ with open ('bandsComparison.bfile', 'w') as outputFile: outputFile.write ("READ NXY \"eigenv1.dat\" \n") xmgrace.printFontSection (outputFile) xmgrace.printTraces (outputFile, bands1, traceColor=colors[0], firstBand=0) outputFile.write ("READ NXY \"eigenv2.dat\" \n") xmgrace.printTraces (outputFile, bands2, traceColor=colors[1], firstBand=bands1.nBands) xmgrace.printAxis (outputFile, bands1) xmgrace.printLabel (outputFile) ################################ ## MAIN FUNCTION FOR VASPIRIN ## ################################ def main(): ''' plot_compared_bands main function By default, informations are extracted from typical VASP files, such as OUTCAR, KPOINTS in each specified path ''' ## Parse arguments from the command line args = parseArgs() ## Print information on the screen only if the user wants to receive it if not args.quiet: printHello () printRunDescription (args) ## Read and configure the paths try: path_1 = args.input_folders[0] path_2 = args.input_folders[1] except IndexError: print ("Two paths must be specified! Exiting...") sys.exit(1) if path_1[-1] != '/': path_1 += '/' if path_2[-1] != '/': path_2 += '/' ## Create classes responsible for processing files dat = datIO.DatFiles () dat.setInterpolateOptions (args.interpolate) xmgrace = graceIO.Grace () # Reading the KPOINTS file from path 1 try: xmgrace.readXticks (path_1 + 'KPOINTS') except: print ("Wrong header formatting in KPOINTS file. Plotting without k-points on the x axis...") ## Set the range of the y axis xmgrace.setYaxis (args.yaxis[0], args.yaxis[1]) ## Configuring the band structure data bsData_1 = outcar.BandStructure (fOutcar = path_1 + 'OUTCAR', nKPTignore = args.ignore) bsData_1.setReferenceString(args.ref[0]) bsData_1.setSOC(args.soc) bsData_2 = outcar.BandStructure (fOutcar = path_2 + 'OUTCAR', nKPTignore = args.ignore) bsData_2.setReferenceString(args.ref[1]) bsData_2.setSOC(args.soc) dat.datEigenvals (bsData_1, datName='eigenv1.dat') dat.datEigenvals (bsData_2, datName='eigenv2.dat') printComparisonBands (xmgrace, args.colors, bsData_1, bsData_2) print ("Print the results using XMgrace\n xmgrace -batch bandsComparison.bfile") #~ print ("(add -hardcopy -nosafe to the xmgrace command if you want to print it directly)") if __name__ == "__main__": main ()
gpl-3.0
incaser/project
__unported__/project_service_type/__openerp__.py
21
1886
# -*- coding: utf-8 -*- ############################################################################## # # @author Grand-Guillaume Joel # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## { "name": "Add type of service on project", "version": "1.0", "author": "Camptocamp,Odoo Community Association (OCA)", "category": "Generic Modules/Projects & Services", "description": """ This will add a type of service on project allowing you to categorize them. """, "website": "http://camptocamp.com", "license": "GPL-2 or any later version", "depends": [ "project", ], "init_xml": [], "demo_xml": [], "update_xml": [ "project_view.xml" ], "active": False, "installable": False }
agpl-3.0