repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
fluggo/Canvas | fluggo/editor/plugins/_codec.py | 1 | 9363 | # This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2012 Brian J. Crowell <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from fluggo import logging
_log = logging.getLogger(__name__)
from ._base import *
from ._source import *
class CodecPlugin(Plugin):
'''Provides encoders and decoders for one or more stream formats.
Encoders and decoders transform from CodecPacketSource to AudioStream or VideoStream and back.
They do not expose any intermediate steps, such as CodedImageSources, except
where options for reconstructing images are exposed in the codec's settings.'''
@classmethod
def get_all_codecs(cls):
'''Return a list of all codecs supported by this plugin.'''
return []
class Codec(object):
'''Sets this codec's initial place among other codecs. If a codec thinks
it might be particularly good at decoding/encoding a format, it might bump
this higher.'''
default_priority = 0
'''A reference to the plugin that provided this codec.'''
plugin = None
name = None
'''A frozenset of format URNs this codec supports.'''
format_urns = frozenset()
'''A URN that uniquely identifies this codec.'''
urn = None
# Video/audio/subtitle/etc.
stream_type = None
'''True if the codec can decode streams.'''
can_decode = False
'''True if the codec can encode streams.'''
can_encode = False
@classmethod
def get_localized_name(self):
'''Return the localized name of this codec, or None if the codec doesn't
support the current locale.'''
return None
def get_definition(self):
'''Return a dictionary of parameters that can be passed to the codec's
constructor to re-create the object.'''
return {}
def create_encoder(self, stream, offset, length):
'''Return a CodecPacketSource for the given stream and definition (which can be None if a source is trying to discover).
*defined_range* identifies which part of the stream should be encoded.
The returned object needs to have a get_definition() method, which sources
can pass to this method to re-create the encoder, and a codec attribute which
could be used to identify this codec.'''
raise NotImplementedError
def create_decoder(self, packet_stream, offset, length):
'''Return a stream object (VideoStream, AudioStream, etc.) to decode the given packet stream and definition (which can be None if a source is trying to discover).
*defined_range* is supplied by the source, and should indicate where and how long the
stream is.
The returned object needs to have a get_definition() method, which sources
can pass to this method to re-create the decoder, and a codec attribute which
could be used to identify this codec.'''
raise NotImplementedError
class NotConnectedError(Exception):
pass
class _DecoderConnector(object):
'''Finds a video codec to decode the given stream.
This class publishes alerts for any error that happens when finding the
codec.'''
def __init__(self, packet_stream, format_urn, offset, length, model_obj=None, codec_urn=None, definition=None):
'''Creates a connector for the given *packet_stream*.
If *codec_urn* is given, the connector tries to find the exact decoder
and create it with the given *definition*. Otherwise, it tries to find
a codec that can decode *format_urn* and creates it with no settings.'''
if not packet_stream:
raise ValueError('packet_stream cannot be None')
self._pktstream = packet_stream
self._offset = offset
self._length = length
self._start_definition = definition or {}
self._format_urn = format_urn
self._codec_urn = codec_urn
self.model_obj = model_obj
self.codec = None
self.decoder = None
self._error = None
self.connect()
# TODO: Handle codecs appearing (and disappearing?)
def _clear(self):
self.set_base_filter(None, new_range=(None, None))
self.set_format(None)
def get_definition(self):
if not self.decoder:
return self._start_definition
return self.decoder.get_definition()
def connect(self):
try:
if self.decoder:
self.unfollow_alerts(self.decoder)
self.decoder = None
self.codec = None
if self._error:
self.hide_alert(self._error)
self._error = None
if self._codec_urn:
# We're out to find a specific codec
codec_class = PluginManager.get_codec_by_urn(self._codec_urn)
decoder = None
codec = None
if not codec_class:
self._clear()
self._error = Alert('Could not find codec "' + self._codec_urn + '". Check to see that it is installed and enabled.',
model_obj=self.model_obj, icon=AlertIcon.Error)
self.show_alert(self._error)
return
try:
codec = codec_class(**self._start_definition)
except:
self._clear()
self._error = Alert('Error while creating codec instance',
model_obj=self.model_obj, icon=AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
try:
self.decoder = codec.create_decoder(self._pktstream, self._offset, self._length)
self.codec = codec
except:
self._clear()
self._error = Alert('Error while creating decoder',
model_obj=self.model_obj, icon=AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
else:
# Try to find one that handles the format
codecs = PluginManager.find_decoders(self._format_urn)
if not len(codecs):
self._clear()
self._error = Alert('No codecs found to handle format "' + self._format_urn + '".',
model_obj=self.model_obj, icon=AlertIcon.Error)
self.show_alert(self._error)
return
for codec_class in codecs:
codec = None
try:
codec = codec_class()
except:
_log.warning('Error while creating instance of codec {0}', codec.__name__, exc_info=True)
continue
try:
self.decoder = codec.create_decoder(self._pktstream, self._offset, self._length)
self.codec = codec
except:
_log.warning('Error while trying codec {0}', codec.urn, exc_info=True)
if not self.decoder:
self._clear()
self._error = Alert('No codecs found to handle format "' + self._format_urn + '". All codecs that were tried failed. See log for details.',
model_obj=self.model_obj, icon=AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.decoder)
# TODO: If we set the defined_range here, can we skip giving it to the codec? (Answer: probably)
# What do offset and length mean for codecs that don't start at zero? (Answer: codecs probably shouldn't start at anything but zero)
self.set_format(None)
self.set_base_filter(self.decoder, new_range=self.decoder.defined_range)
self.set_format(self.decoder.format)
except:
_log.warning('Error while finding codec for format "' + self._format_urn + '"', exc_info=True)
self._clear()
self._error = Alert('Error while finding codec for format "' + self._format_urn + '"', model_obj=self.model_obj, icon=AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
class VideoDecoderConnector(_DecoderConnector, VideoStream):
def __init__(self, *args, **kw):
VideoStream.__init__(self)
_DecoderConnector.__init__(self, *args, **kw)
class AudioDecoderConnector(_DecoderConnector, AudioStream):
def __init__(self, *args, **kw):
AudioStream.__init__(self)
_DecoderConnector.__init__(self, *args, **kw)
| gpl-3.0 |
halaszk/universal7420 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
ridfrustum/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/gdal/prototypes/geom.py | 315 | 4821 | import re
from datetime import date
from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, string_output, void_output
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| gpl-3.0 |
zhuwenping/python-for-android | python-build/python-libs/gdata/build/lib/gdata/experimental_oauth.py | 133 | 4540 | #!/usr/bin/env python
import binascii
import urllib
import time
import random
import hmac
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
OAUTH_VERSION = '1.0'
def get_normalized_http_url(http_request):
full_url = http_request.uri.to_string()
return full_url[:full_url.find('?')]
def escape(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.quote(s, safe='~')
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in xrange(length)])
def timestamp():
return int(time.time())
def get_normalized_parameters(http_request, oauth_params):
params = oauth_params.copy()
params.update(http_request.uri.query)
if 'oauth_signature' in params:
del params['oauth_signature']
pairs = params.items()
# sort lexicographically, first after key, then after value
pairs.sort()
# combine key value pairs in string and escape
x = '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in pairs])
return x
def build_signature_base_string(http_request, oauth_params):
return '&'.join(
escape(http_request.method.upper()),
escape(get_normalized_http_url(http_request)),
escape(get_normalized_parameters(http_request, oauth_params)))
def build_hmac_signature(self, http_request, oauth_params, consumer_secret,
token_secret):
raw = build_signature_base_string(http_request, oauth_params)
key = None
hashed = None
if token_secret:
key = '%s&%s' % (escape(consumer_secret), escape(token_secret))
else:
key = '%s&' % escape(consumer_secret)
try:
import hashlib
hashed = hmac.new(key, raw, hashlib.sha1)
except ImportError:
import sha
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
#?
def build_rsa_signature(self, http_request, oauth_params, cert):
base_string = build_signature_base_string(http_request, oauth_params)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
#?
def check_signature(self, http_request, oauth_params, cert, signature):
decoded_sig = base64.b64decode(signature);
base_string = build_signature_base_string(http_request, oauth_params)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
return publickey.hashAndVerify(decoded_sig, base_string)
def to_auth_header(oauth_params):
# Create a tuple containing key value pairs with an = between.
# Example: oauth_token="ad180jjd733klru7"
pairs = ('%s="%s"' % (escape(k), escape(v)) for k, v in oauth_params.iteritems())
# Place a , between each pair and return as an OAuth auth header value.
return 'OAuth %s' % (','.join(pairs))
TEST_PUBLIC_CERT = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
TEST_PRIVATE_CERT = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
| apache-2.0 |
AICP/kernel_motorola_msm8992 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
alqfahad/odoo | addons/mail/res_users.py | 314 | 10337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp import api
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import openerp
class res_users(osv.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
"""
_name = 'res.users'
_inherit = ['res.users']
_inherits = {'mail.alias': 'alias_id'}
_columns = {
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications.", copy=False, auto_join=True),
'display_groups_suggestions': fields.boolean("Display Groups Suggestions"),
}
_defaults = {
'display_groups_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['notify_email', 'display_groups_suggestions'])
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notify_email', 'alias_domain', 'alias_name', 'display_groups_suggestions'])
return init_res
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, partner following themselves """
# create aliases for all users and avoid constraint errors
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(res_users, self)._auto_init,
self._name, self._columns['alias_id'], 'login', alias_force_key='id', context=context)
def create(self, cr, uid, data, context=None):
if not data.get('login', False):
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
if context is None:
context = {}
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name)
user_id = super(res_users, self).create(cr, uid, data, context=create_context)
user = self.browse(cr, uid, user_id, context=context)
self.pool.get('mail.alias').write(cr, SUPERUSER_ID, [user.alias_id.id], {"alias_force_thread_id": user_id, "alias_parent_thread_id": user_id}, context)
# create a welcome message
self._create_welcome_message(cr, uid, user, context=context)
return user_id
def copy_data(self, *args, **kwargs):
data = super(res_users, self).copy_data(*args, **kwargs)
if data and data.get('alias_name'):
data['alias_name'] = data['login']
return data
def _create_welcome_message(self, cr, uid, user, context=None):
if not self.has_group(cr, uid, 'base.group_user'):
return False
company_name = user.company_id.name if user.company_id else ''
body = _('%s has joined the %s network.') % (user.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.pool.get('res.partner').message_post(cr, SUPERUSER_ID, [user.partner_id.id],
body=body, context=context)
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the user.
alias_pool = self.pool.get('mail.alias')
alias_ids = [user.alias_id.id for user in self.browse(cr, uid, ids, context=context) if user.alias_id]
res = super(res_users, self).unlink(cr, uid, ids, context=context)
alias_pool.unlink(cr, uid, alias_ids, context=context)
return res
def _message_post_get_pid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context['thread_model'] = 'res.users'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.browse(cr, SUPERUSER_ID, thread_id).partner_id.id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users as a private discussion.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
current_pids = []
partner_ids = kwargs.get('partner_ids', [])
user_pid = self._message_post_get_pid(cr, uid, thread_id, context=context)
for partner_id in partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
current_pids.append(partner_id[1])
elif isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
current_pids.append(partner_id[2])
elif isinstance(partner_id, (int, long)):
current_pids.append(partner_id)
if user_pid not in current_pids:
partner_ids.append(user_pid)
kwargs['partner_ids'] = partner_ids
if context and context.get('thread_model') == 'res.partner':
return self.pool['res.partner'].message_post(cr, uid, user_pid, **kwargs)
return self.pool['mail.thread'].message_post(cr, uid, uid, **kwargs)
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
return True
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return True
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('mail.thread').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
return dict((res_id, list()) for res_id in ids)
def stop_showing_groups_suggestions(self, cr, uid, user_id, context=None):
"""Update display_groups_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_groups_suggestions": False}, context)
class res_users_mail_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check mail.groups linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
# FP Note: to improve, post processing may be better ?
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_users_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', user_group_ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, ids, context=context)
return write_res
class res_groups_mail_group(osv.Model):
""" Update of res.groups class
- if adding users from a group, check mail.groups linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
# FP Note: to improve, post processeing, after the super may be better
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_groups_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, user_ids, context=context)
return write_res
| agpl-3.0 |
cjhak/b2share | invenio/testsuite/test_legacy_webdoc.py | 13 | 6522 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebDoc module unit tests."""
__revision__ = "$Id$"
from flask import current_app
from invenio.base.globals import cfg
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
gettext_set_language = lazy_import('invenio.base.i18n:gettext_set_language')
transform = lazy_import('invenio.legacy.webdoc.api:transform')
class WebDocLanguageTest(InvenioTestCase):
"""Check that WebDoc correctly supports <lang> translation
directives and _()_ syntax"""
def setUp(self):
self.__langs = current_app.config['CFG_SITE_LANGS']
current_app.config['CFG_SITE_LANGS'] = ['de', 'en', 'fr']
def tearDown(self):
current_app.config['CFG_SITE_LANGS'] = self.__langs
def test_language_filtering(self):
"""webdoc - language filtering"""
result = transform('''
<strong>
<lang>
<python>{}</python>
<en><red>Book</red></en>
<fr><yellow>Livre</yellow></fr>
<de><blue>Buch</blue></de>
</lang>
</strong>
''', languages=['de'])
# German is kept
self.assertEqual(result[0][0], 'de')
self.assert_('<blue>Buch</blue>' in result[0][1])
# English and French must be filtered out in any case
self.assert_('Livre' not in result[0][1])
self.assert_('Book' not in result[0][1])
# Python is not considered as a language, so the string is
# kept as it is
self.assert_('<python>{}</python' in result[0][1])
def test_string_translation(self):
"""webdoc - string translation"""
result = transform('my_string: _(Search)_ (end)',
languages=[cfg['CFG_SITE_LANG']])
_ = gettext_set_language(cfg['CFG_SITE_LANG'])
self.assertEqual(result[0][1],
'my_string: %s (end)' % _("Search"))
class WebDocPartsTest(InvenioTestCase):
"""Check that WebDoc correctly returns values for the different
parts of webdoc files"""
def setUp(self):
self.__langs = current_app.config['CFG_SITE_LANGS']
current_app.config['CFG_SITE_LANGS'] = ['de', 'en', 'fr']
def tearDown(self):
current_app.config['CFG_SITE_LANGS'] = self.__langs
def test_parts(self):
"""webdoc - retrieving parts of webdoc file (title, navtrail, etc)"""
from invenio.config import CFG_SITE_URL
_ = gettext_set_language(cfg['CFG_SITE_LANG'])
result = transform('''
<!-- WebDoc-Page-Title: _(Help Central)_ -->
<!-- WebDoc-Page-Navtrail: <a class="navtrail" href="<CFG_SITE_URL>/help/hacking">Hacking Invenio</a> > <a class="navtrail" href="webstyle-internals">WebStyle Internals</a> -->
<!-- WebDoc-Page-Revision: $Id: help-central.webdoc,v 1.5 2008/05/26 12:52:41 jerome Exp $ -->
<!-- WebDoc-Page-Description: A description -->''',
languages=[cfg['CFG_SITE_LANG']])
# Title
self.assertEqual(result[0][2], _("Help Central"))
# Keywords. None in our sample
self.assertEqual(result[0][3], None)
# Navtrail
self.assertEqual(result[0][4], '<a class="navtrail" href="%s/help/hacking">Hacking Invenio</a> > <a class="navtrail" href="webstyle-internals">WebStyle Internals</a>' % CFG_SITE_URL)
# Revision. Keep date & time only
self.assertEqual(result[0][5], '2008-05-26 12:52:41')
# Description
self.assertEqual(result[0][6], 'A description')
class WebDocVariableReplacementTest(InvenioTestCase):
"""Check that WebDoc correctly replaces variables with their
values"""
def setUp(self):
self.__langs = current_app.config['CFG_SITE_LANGS']
current_app.config['CFG_SITE_LANGS'] = ['de', 'en', 'fr']
def tearDown(self):
current_app.config['CFG_SITE_LANGS'] = self.__langs
def test_CFG_SITE_URL_variable_replacement(self):
"""webdoc - replacing <CFG_SITE_URL> in webdoc files"""
from invenio.config import CFG_SITE_URL
result = transform('<CFG_SITE_URL>', languages=[cfg['CFG_SITE_LANG']])
self.assertEqual(result[0][1], CFG_SITE_URL)
def test_language_tags_replacement(self):
"""webdoc - replacing <lang:link /> and <lang:current /> in
webdoc files"""
result = transform('<lang:current />', languages=[cfg['CFG_SITE_LANG']])
self.assertEqual(result[0][1], cfg['CFG_SITE_LANG'])
# ?ln=.. is returned only if not cfg['CFG_SITE_LANG']
result = transform('<lang:link />', languages=[cfg['CFG_SITE_LANG']])
self.assertEqual(result[0][1], '?ln=%s' % cfg['CFG_SITE_LANG'])
result = transform('<lang:link />', languages=['fr'])
self.assertEqual(result[0][1], '?ln=fr')
class WebDocCommentsFiltering(InvenioTestCase):
"""Check that comments are correctly removed from webdoc files"""
def setUp(self):
self.__langs = current_app.config['CFG_SITE_LANGS']
current_app.config['CFG_SITE_LANGS'] = ['de', 'en', 'fr']
def tearDown(self):
current_app.config['CFG_SITE_LANGS'] = self.__langs
def test_comments_filtering(self):
"""webdoc - removing comments"""
result = transform('''# -*- coding: utf-8 -*-
## $Id$
##''',
languages=[cfg['CFG_SITE_LANG']])
self.assertEqual(result[0][1], '')
TEST_SUITE = make_test_suite(WebDocLanguageTest,
WebDocPartsTest,
WebDocVariableReplacementTest,
WebDocCommentsFiltering,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
maxrosan/NS-3-support-for-OBS | src/topology-read/bindings/modulegen__gcc_LP64.py | 2 | 151161 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.topology_read', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-list.h (module 'core'): ns3::AttributeList [class]
module.add_class('AttributeList', import_from_module='ns.core')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper [class]
module.add_class('TopologyReaderHelper')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo [struct]
module.add_class('AttributeInfo', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList [class]
module.add_class('UnsafeAttributeList', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader [class]
module.add_class('TopologyReader', parent=root_module['ns3::Object'])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link [class]
module.add_class('Link', outer_class=root_module['ns3::TopologyReader'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader [class]
module.add_class('InetTopologyReader', parent=root_module['ns3::TopologyReader'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader [class]
module.add_class('OrbisTopologyReader', parent=root_module['ns3::TopologyReader'])
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader [class]
module.add_class('RocketfuelTopologyReader', parent=root_module['ns3::TopologyReader'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::map< std::string, std::string >', ('std::string', 'std::string'), container_type='map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeList_methods(root_module, root_module['ns3::AttributeList'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TopologyReaderHelper_methods(root_module, root_module['ns3::TopologyReaderHelper'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInfo_methods(root_module, root_module['ns3::TypeId::AttributeInfo'])
register_Ns3UnsafeAttributeList_methods(root_module, root_module['ns3::UnsafeAttributeList'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TopologyReader_methods(root_module, root_module['ns3::TopologyReader'])
register_Ns3TopologyReaderLink_methods(root_module, root_module['ns3::TopologyReader::Link'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3InetTopologyReader_methods(root_module, root_module['ns3::InetTopologyReader'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3OrbisTopologyReader_methods(root_module, root_module['ns3::OrbisTopologyReader'])
register_Ns3RocketfuelTopologyReader_methods(root_module, root_module['ns3::RocketfuelTopologyReader'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeList_methods(root_module, cls):
## attribute-list.h (module 'core'): ns3::AttributeList::AttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h (module 'core'): ns3::AttributeList::AttributeList(ns3::AttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::AttributeList const &', 'o')])
## attribute-list.h (module 'core'): bool ns3::AttributeList::DeserializeFromString(std::string value) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value')])
## attribute-list.h (module 'core'): static ns3::AttributeList * ns3::AttributeList::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::AttributeList *',
[],
is_static=True)
## attribute-list.h (module 'core'): void ns3::AttributeList::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## attribute-list.h (module 'core'): std::string ns3::AttributeList::SerializeToString() const [member function]
cls.add_method('SerializeToString',
'std::string',
[],
is_const=True)
## attribute-list.h (module 'core'): void ns3::AttributeList::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h (module 'core'): bool ns3::AttributeList::SetFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h (module 'core'): void ns3::AttributeList::SetWithTid(ns3::TypeId tid, std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetWithTid',
'void',
[param('ns3::TypeId', 'tid'), param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TopologyReaderHelper_methods(root_module, cls):
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper(ns3::TopologyReaderHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TopologyReaderHelper const &', 'arg0')])
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper() [constructor]
cls.add_constructor([])
## topology-reader-helper.h (module 'topology-read'): ns3::Ptr<ns3::TopologyReader> ns3::TopologyReaderHelper::GetTopologyReader() [member function]
cls.add_method('GetTopologyReader',
'ns3::Ptr< ns3::TopologyReader >',
[])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileName(std::string const fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const', 'fileName')])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileType(std::string const fileType) [member function]
cls.add_method('SetFileType',
'void',
[param('std::string const', 'fileType')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeAccessor const> ns3::TypeId::GetAttributeAccessor(uint32_t i) const [member function]
cls.add_method('GetAttributeAccessor',
'ns3::Ptr< ns3::AttributeAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::TypeId::GetAttributeChecker(uint32_t i) const [member function]
cls.add_method('GetAttributeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeFlags(uint32_t i) const [member function]
cls.add_method('GetAttributeFlags',
'uint32_t',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeHelp(uint32_t i) const [member function]
cls.add_method('GetAttributeHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue const> ns3::TypeId::GetAttributeInitialValue(uint32_t i) const [member function]
cls.add_method('GetAttributeInitialValue',
'ns3::Ptr< ns3::AttributeValue const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeName(uint32_t i) const [member function]
cls.add_method('GetAttributeName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::GetTraceSourceAccessor(uint32_t i) const [member function]
cls.add_method('GetTraceSourceAccessor',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetTraceSourceHelp(uint32_t i) const [member function]
cls.add_method('GetTraceSourceHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetTraceSourceName(uint32_t i) const [member function]
cls.add_method('GetTraceSourceName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupAttributeByFullName(std::string fullName, ns3::TypeId::AttributeInfo * info) [member function]
cls.add_method('LookupAttributeByFullName',
'bool',
[param('std::string', 'fullName'), param('ns3::TypeId::AttributeInfo *', 'info')],
is_static=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInfo * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInfo *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInfo_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::AttributeInfo() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::AttributeInfo(ns3::TypeId::AttributeInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInfo const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3UnsafeAttributeList_methods(root_module, cls):
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList::UnsafeAttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList::UnsafeAttributeList(ns3::UnsafeAttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::UnsafeAttributeList const &', 'o')])
## attribute-list.h (module 'core'): ns3::AttributeList ns3::UnsafeAttributeList::GetSafe(std::string name) const [member function]
cls.add_method('GetSafe',
'ns3::AttributeList',
[param('std::string', 'name')],
is_const=True)
## attribute-list.h (module 'core'): void ns3::UnsafeAttributeList::Set(std::string name, ns3::AttributeValue const & param) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'param')])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TopologyReader_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::TopologyReader() [constructor]
cls.add_constructor([])
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::AddLink(ns3::TopologyReader::Link link) [member function]
cls.add_method('AddLink',
'void',
[param('ns3::TopologyReader::Link', 'link')])
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::GetFileName() const [member function]
cls.add_method('GetFileName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::TopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksBegin() const [member function]
cls.add_method('LinksBegin',
'std::_List_const_iterator< ns3::TopologyReader::Link >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::LinksEmpty() const [member function]
cls.add_method('LinksEmpty',
'bool',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksEnd() const [member function]
cls.add_method('LinksEnd',
'std::_List_const_iterator< ns3::TopologyReader::Link >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): int ns3::TopologyReader::LinksSize() const [member function]
cls.add_method('LinksSize',
'int',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::TopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_pure_virtual=True, is_virtual=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::SetFileName(std::string const fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const', 'fileName')])
return
def register_Ns3TopologyReaderLink_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::TopologyReader::Link const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TopologyReader::Link const &', 'arg0')])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::Ptr<ns3::Node> fromPtr, std::string fromName, ns3::Ptr<ns3::Node> toPtr, std::string toName) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'fromPtr'), param('std::string', 'fromName'), param('ns3::Ptr< ns3::Node >', 'toPtr'), param('std::string', 'toName')])
## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesBegin() [member function]
cls.add_method('AttributesBegin',
'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[])
## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesEnd() [member function]
cls.add_method('AttributesEnd',
'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[])
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetAttribute(std::string name) [member function]
cls.add_method('GetAttribute',
'std::string',
[param('std::string', 'name')])
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::Link::GetAttributeFailSafe(std::string name, std::string & value) [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('std::string &', 'value')])
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetFromNode() const [member function]
cls.add_method('GetFromNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetFromNodeName() const [member function]
cls.add_method('GetFromNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetToNode() const [member function]
cls.add_method('GetToNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetToNodeName() const [member function]
cls.add_method('GetToNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::Link::SetAttribute(std::string name, std::string & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('std::string &', 'value')])
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3InetTopologyReader_methods(root_module, cls):
## inet-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::InetTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader::InetTopologyReader() [constructor]
cls.add_constructor([])
## inet-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::InetTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::NotifyDeviceAdded(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('NotifyDeviceAdded',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
visibility='private', is_virtual=True)
return
def register_Ns3OrbisTopologyReader_methods(root_module, cls):
## orbis-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::OrbisTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader::OrbisTopologyReader() [constructor]
cls.add_constructor([])
## orbis-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::OrbisTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3RocketfuelTopologyReader_methods(root_module, cls):
## rocketfuel-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::RocketfuelTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader::RocketfuelTopologyReader() [constructor]
cls.add_constructor([])
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::RocketfuelTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
xinjiguaike/edx-platform | common/lib/symmath/symmath/formula.py | 66 | 25851 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Flexible python representation of a symbolic mathematical formula.
Acceptes Presentation MathML, Content MathML (and could also do OpenMath).
Provides sympy representation.
"""
#
# File: formula.py
# Date: 04-May-12 (creation)
# Author: I. Chuang <[email protected]>
#
import os
import string # pylint: disable=deprecated-module
import re
import logging
import operator
import requests
import sympy
from sympy.printing.latex import LatexPrinter
from sympy.printing.str import StrPrinter
from sympy import latex, sympify
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.state import Ket
from xml.sax.saxutils import unescape
import unicodedata
from lxml import etree
#import subprocess
from copy import deepcopy
log = logging.getLogger(__name__)
log.warning("Dark code. Needs review before enabling in prod.")
os.environ['PYTHONIOENCODING'] = 'utf-8'
#-----------------------------------------------------------------------------
class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member
"""my dot product"""
zero = sympy.Symbol('dotzero')
identity = sympy.Symbol('dotidentity')
def _print_dot(_self, expr):
"""Print statement used for LatexPrinter"""
return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1])
LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# unit vectors (for 8.02)
def _print_hat(_self, expr):
"""Print statement used for LatexPrinter"""
return '\\hat{%s}' % str(expr.args[0]).lower()
LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access
StrPrinter._print_hat = _print_hat # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# helper routines
def to_latex(expr):
"""
Convert expression to latex mathjax format
"""
if expr is None:
return ''
expr_s = latex(expr)
expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek
# substitute back into latex form for scripts
# literally something of the form
# 'scriptN' becomes '\\mathcal{N}'
# note: can't use something akin to the _print_hat method above because we sometimes get 'script(N)__B' or more complicated terms
expr_s = re.sub(
r'script([a-zA-Z0-9]+)',
'\\mathcal{\\1}',
expr_s
)
#return '<math>%s{}{}</math>' % (xs[1:-1])
if expr_s[0] == '$':
return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6
return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7
def my_evalf(expr, chop=False):
"""
Enhanced sympy evalf to handle lists of expressions
and catch eval failures without dropping out.
"""
if isinstance(expr, list):
try:
return [x.evalf(chop=chop) for x in expr]
except:
return expr
try:
return expr.evalf(chop=chop)
except:
return expr
def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None):
"""
Version of sympify to import expression into sympy
"""
# make all lowercase real?
if symtab:
varset = symtab
else:
varset = {'p': sympy.Symbol('p'),
'g': sympy.Symbol('g'),
'e': sympy.E, # for exp
'i': sympy.I, # lowercase i is also sqrt(-1)
'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key"
'I': sympy.Symbol('I'), # otherwise it is sqrt(-1)
'N': sympy.Symbol('N'), # or it is some kind of sympy function
'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing
'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI
'hat': sympy.Function('hat'), # for unit vectors (8.02)
}
if do_qubit: # turn qubit(...) into Qubit instance
varset.update({'qubit': Qubit,
'Ket': Ket,
'dot': dot,
'bit': sympy.Function('bit'),
})
if abcsym: # consider all lowercase letters as real symbols, in the parsing
for letter in string.lowercase:
if letter in varset: # exclude those already done
continue
varset.update({letter: sympy.Symbol(letter, real=True)})
sexpr = sympify(expr, locals=varset)
if normphase: # remove overall phase if sexpr is a list
if isinstance(sexpr, list):
if sexpr[0].is_number:
ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0])
sexpr = [sympy.Mul(x, ophase) for x in sexpr]
def to_matrix(expr):
"""
Convert a list, or list of lists to a matrix.
"""
# if expr is a list of lists, and is rectangular, then return Matrix(expr)
if not isinstance(expr, list):
return expr
for row in expr:
if not isinstance(row, list):
return expr
rdim = len(expr[0])
for row in expr:
if not len(row) == rdim:
return expr
return sympy.Matrix(expr)
if matrix:
sexpr = to_matrix(sexpr)
return sexpr
#-----------------------------------------------------------------------------
# class for symbolic mathematical formulas
class formula(object):
"""
Representation of a mathematical formula object. Accepts mathml math expression
for constructing, and can produce sympy translation. The formula may or may not
include an assignment (=).
"""
def __init__(self, expr, asciimath='', options=None):
self.expr = expr.strip()
self.asciimath = asciimath
self.the_cmathml = None
self.the_sympy = None
self.options = options
def is_presentation_mathml(self):
"""
Check if formula is in mathml presentation format.
"""
return '<mstyle' in self.expr
def is_mathml(self):
"""
Check if formula is in mathml format.
"""
return '<math ' in self.expr
def fix_greek_in_mathml(self, xml):
"""
Recursively fix greek letters in passed in xml.
"""
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
for k in xml:
tag = gettag(k)
if tag == 'mi' or tag == 'ci':
usym = unicode(k.text)
try:
udata = unicodedata.name(usym)
except Exception:
udata = None
# print "usym = %s, udata=%s" % (usym,udata)
if udata: # eg "GREEK SMALL LETTER BETA"
if 'GREEK' in udata:
usym = udata.split(' ')[-1]
if 'SMALL' in udata:
usym = usym.lower()
#print "greek: ",usym
k.text = usym
self.fix_greek_in_mathml(k)
return xml
def preprocess_pmathml(self, xml):
r"""
Pre-process presentation MathML from ASCIIMathML to make it more
acceptable for SnuggleTeX, and also to accomodate some sympy
conventions (eg hat(i) for \hat{i}).
This method would be a good spot to look for an integral and convert
it, if possible...
"""
if isinstance(xml, (str, unicode)):
xml = etree.fromstring(xml) # TODO: wrap in try
xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
def fix_pmathml(xml):
"""
f and g are processed as functions by asciimathml, eg "f-2" turns
into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is
really terrible for turning into cmathml. undo this here.
"""
for k in xml:
tag = gettag(k)
if tag == 'mrow':
if len(k) == 2:
if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':
idx = xml.index(k)
xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container
xml.insert(idx + 1, deepcopy(k[1]))
xml.remove(k)
fix_pmathml(k)
fix_pmathml(xml)
def fix_hat(xml):
"""
hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle
this into <mi>hat(f)</mi> hat i also somtimes turned into
<mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>
"""
for k in xml:
tag = gettag(k)
if tag == 'mover':
if len(k) == 2:
if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0].text
xml.replace(k, newk)
if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0][0].text
xml.replace(k, newk)
fix_hat(k)
fix_hat(xml)
def flatten_pmathml(xml):
"""
Give the text version of certain PMathML elements
Sometimes MathML will be given with each letter separated (it
doesn't know if its implicit multiplication or what). From an xml
node, find the (text only) variable name it represents. So it takes
<mrow>
<mi>m</mi>
<mi>a</mi>
<mi>x</mi>
</mrow>
and returns 'max', for easier use later on.
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'mrow':
return ''.join([flatten_pmathml(y) for y in xml])
raise Exception('[flatten_pmathml] unknown tag %s' % tag)
def fix_mathvariant(parent):
"""
Fix certain kinds of math variants
Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle>
with 'scriptN'. There have been problems using script_N or script(N)
"""
for child in parent:
if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script':
newchild = etree.Element('mi')
newchild.text = 'script%s' % flatten_pmathml(child[0])
parent.replace(child, newchild)
fix_mathvariant(child)
fix_mathvariant(xml)
# find "tagged" superscripts
# they have the character \u200b in the superscript
# replace them with a__b so snuggle doesn't get confused
def fix_superscripts(xml):
""" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'
In the javascript, variables with '__X' in them had an invisible
character inserted into the sup (to distinguish from powers)
E.g. normal:
<msubsup>
<mi>a</mi>
<mi>b</mi>
<mi>c</mi>
</msubsup>
to be interpreted '(a_b)^c' (nothing done by this method)
And modified:
<msubsup>
<mi>b</mi>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>d</mi>
</mrow>
</msubsup>
to be interpreted 'a_b__c'
also:
<msup>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>B</mi>
</mrow>
</msup>
to be 'x__B'
"""
for k in xml:
tag = gettag(k)
# match things like the last example--
# the second item in msub is an mrow with the first
# character equal to \u200b
if (
tag == 'msup' and
len(k) == 2 and gettag(k[1]) == 'mrow' and
gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew
):
# replace the msup with 'X__Y'
k[1].remove(k[1][0])
newk = etree.Element('mi')
newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))
xml.replace(k, newk)
# match things like the middle example-
# the third item in msubsup is an mrow with the first
# character equal to \u200b
if (
tag == 'msubsup' and
len(k) == 3 and gettag(k[2]) == 'mrow' and
gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew
):
# replace the msubsup with 'X_Y__Z'
k[2].remove(k[2][0])
newk = etree.Element('mi')
newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))
xml.replace(k, newk)
fix_superscripts(k)
fix_superscripts(xml)
def fix_msubsup(parent):
"""
Snuggle returns an error when it sees an <msubsup> replace such
elements with an <msup>, except the first element is of
the form a_b. I.e. map a_b^c => (a_b)^c
"""
for child in parent:
# fix msubsup
if gettag(child) == 'msubsup' and len(child) == 3:
newchild = etree.Element('msup')
newbase = etree.Element('mi')
newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))
newexp = child[2]
newchild.append(newbase)
newchild.append(newexp)
parent.replace(child, newchild)
fix_msubsup(child)
fix_msubsup(xml)
self.xml = xml # pylint: disable=attribute-defined-outside-init
return self.xml
def get_content_mathml(self):
if self.the_cmathml:
return self.the_cmathml
# pre-process the presentation mathml before sending it to snuggletex to convert to content mathml
try:
xml = self.preprocess_pmathml(self.expr)
except Exception, err:
log.warning('Err %s while preprocessing; expr=%s', err, self.expr)
return "<html>Error! Cannot process pmathml</html>"
pmathml = etree.tostring(xml, pretty_print=True)
self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init
# convert to cmathml
self.the_cmathml = self.GetContentMathML(self.asciimath, pmathml)
return self.the_cmathml
cmathml = property(get_content_mathml, None, None, 'content MathML representation')
def make_sympy(self, xml=None):
"""
Return sympy expression for the math formula.
The math formula is converted to Content MathML then that is parsed.
This is a recursive function, called on every CMML node. Support for
more functions can be added by modifying opdict, abould halfway down
"""
if self.the_sympy:
return self.the_sympy
if xml is None: # root
if not self.is_mathml():
return my_sympify(self.expr)
if self.is_presentation_mathml():
cmml = None
try:
cmml = self.cmathml
xml = etree.fromstring(str(cmml))
except Exception, err:
if 'conversion from Presentation MathML to Content MathML was not successful' in cmml:
msg = "Illegal math expression"
else:
msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)
raise Exception(msg)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
else:
xml = etree.fromstring(self.expr)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
return self.the_sympy
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
# simple math
def op_divide(*args):
if not len(args) == 2:
raise Exception('divide given wrong number of arguments!')
# print "divide: arg0=%s, arg1=%s" % (args[0],args[1])
return sympy.Mul(args[0], sympy.Pow(args[1], -1))
def op_plus(*args):
return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]
def op_times(*args):
return reduce(operator.mul, args)
def op_minus(*args):
if len(args) == 1:
return -args[0]
if not len(args) == 2:
raise Exception('minus given wrong number of arguments!')
#return sympy.Add(args[0],-args[1])
return args[0] - args[1]
opdict = {
'plus': op_plus,
'divide': operator.div, # should this be op_divide?
'times': op_times,
'minus': op_minus,
'root': sympy.sqrt,
'power': sympy.Pow,
'sin': sympy.sin,
'cos': sympy.cos,
'tan': sympy.tan,
'cot': sympy.cot,
'sinh': sympy.sinh,
'cosh': sympy.cosh,
'coth': sympy.coth,
'tanh': sympy.tanh,
'asin': sympy.asin,
'acos': sympy.acos,
'atan': sympy.atan,
'atan2': sympy.atan2,
'acot': sympy.acot,
'asinh': sympy.asinh,
'acosh': sympy.acosh,
'atanh': sympy.atanh,
'acoth': sympy.acoth,
'exp': sympy.exp,
'log': sympy.log,
'ln': sympy.ln,
}
# simple symbols - TODO is this code used?
nums1dict = {
'pi': sympy.pi,
}
def parsePresentationMathMLSymbol(xml):
"""
Parse <msub>, <msup>, <mi>, and <mn>
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'msub':
return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])
elif tag == 'msup':
return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])
raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)
# parser tree for Content MathML
tag = gettag(xml)
# first do compound objects
if tag == 'apply': # apply operator
opstr = gettag(xml[0])
if opstr in opdict:
op = opdict[opstr] # pylint: disable=invalid-name
args = [self.make_sympy(expr) for expr in xml[1:]]
try:
res = op(*args)
except Exception, err:
self.args = args # pylint: disable=attribute-defined-outside-init
self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name
raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args))
return res
else:
raise Exception('[formula]: unknown operator tag %s' % (opstr))
elif tag == 'list': # square bracket list
if gettag(xml[0]) == 'matrix':
return self.make_sympy(xml[0])
else:
return [self.make_sympy(expr) for expr in xml]
elif tag == 'matrix':
return sympy.Matrix([self.make_sympy(expr) for expr in xml])
elif tag == 'vector':
return [self.make_sympy(expr) for expr in xml]
# atoms are below
elif tag == 'cn': # number
return sympy.sympify(xml.text)
# return float(xml.text)
elif tag == 'ci': # variable (symbol)
if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript
usym = parsePresentationMathMLSymbol(xml[0])
sym = sympy.Symbol(str(usym))
else:
usym = unicode(xml.text)
if 'hat' in usym:
sym = my_sympify(usym)
else:
if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1)
sym = sympy.I
else:
sym = sympy.Symbol(str(usym))
return sym
else: # unknown tag
raise Exception('[formula] unknown tag %s' % tag)
sympy = property(make_sympy, None, None, 'sympy representation')
def GetContentMathML(self, asciimath, mathml):
"""
Handle requests to snuggletex API to convert the Ascii math to MathML
"""
# url = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
# url = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
url = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
if 1:
payload = {
'asciiMathInput': asciimath,
'asciiMathML': mathml,
#'asciiMathML':unicode(mathml).encode('utf-8'),
}
headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"}
request = requests.post(url, data=payload, headers=headers, verify=False)
request.encoding = 'utf-8'
ret = request.text
# print "encoding: ", request.encoding
mode = 0
cmathml = []
for k in ret.split('\n'):
if 'conversion to Content MathML' in k:
mode = 1
continue
if mode == 1:
if '<h3>Maxima Input Form</h3>' in k:
mode = 0
continue
cmathml.append(k)
cmathml = '\n'.join(cmathml[2:])
cmathml = '<math xmlns="http://www.w3.org/1998/Math/MathML">\n' + unescape(cmathml) + '\n</math>'
# print cmathml
return cmathml
#-----------------------------------------------------------------------------
def test1():
"""Test XML strings - addition"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<cn>2</cn>
</apply>
</math>
"""
return formula(xmlstr)
def test2():
"""Test XML strings - addition, Greek alpha"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<apply>
<times/>
<cn>2</cn>
<ci>α</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test3():
"""Test XML strings - addition, Greek gamma"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<divide/>
<cn>1</cn>
<apply>
<plus/>
<cn>2</cn>
<ci>γ</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test4():
"""Test XML strings - addition, Greek alpha, mfrac"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mfrac>
<mn>2</mn>
<mi>α</mi>
</mfrac>
</mstyle>
</math>
"""
return formula(xmlstr)
def test5():
"""Test XML strings - sum of two matrices"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>cos</mi>
<mrow>
<mo>(</mo>
<mi>θ</mi>
<mo>)</mo>
</mrow>
</mrow>
<mo>⋅</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
<mo>+</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
</mstyle>
</math>
"""
return formula(xmlstr)
def test6():
"""Test XML strings - imaginary numbers"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mi>i</mi>
</mstyle>
</math>
"""
return formula(xmlstr, options='imaginary')
| agpl-3.0 |
jroweboy/bzrflag | bzrflag/world.py | 19 | 5244 | # Bzrflag
# Copyright 2008-2011 Brigham Young University
#
# This file is part of Bzrflag.
#
# Bzrflag is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Bzrflag is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Bzrflag. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Bzrflag, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail [email protected].
"""BSFlag World Model
The BSFlag World module implements a parser for reading in bzw files and
creating Python objects for all of the static components of a BZFlag world
(such as bases and obstacles). It doesn't implement everything because BSFlag
only worries about a subset of BZFlag features anyway. However, everything
that is supported is implemented correctly. See the bzw man page for more
information about the file format (but note that their BNF is incomplete).
"""
__author__ = "BYU AML Lab <[email protected]>"
__copyright__ = "Copyright 2008-2011 Brigham Young University"
__license__ = "GNU GPL"
import logging
import math
from pyparsing import nums, Word, Keyword, LineEnd, Each, ZeroOrMore, \
Combine, Optional, Dict, SkipTo, Group
import constants
logger = logging.getLogger('world')
def numeric(toks):
n = toks[0]
try:
return int(n)
except ValueError:
return float(n)
integer = Word(nums).setParseAction(numeric)
floatnum = Combine(Optional('-') + ('0' | Word('123456789',nums)) +
Optional('.' + Word(nums)) +
Optional(Word('eE',exact=1) + Word(nums+'+-',nums)))
floatnum.setParseAction(numeric)
end = Keyword('end').suppress()
point2d = floatnum + floatnum
# Note: Since we're just doing 2D, we ignore the z term of 3D points.
point3d = floatnum + floatnum + floatnum.suppress()
# Obstacle
position = Group((Keyword('pos') | Keyword('position')) + point3d)
size = Group(Keyword('size') + point3d)
rotation = Group((Keyword('rot') | Keyword('rotation')) + floatnum)
obstacle_items = [position, Optional(size), Optional(rotation)]
class Box(object):
"""A basic obstacle type."""
def __init__(self, pos=None, position=None, rot=None, rotation=None,
size=None):
self.pos = pos or position
self.rot = rot or rotation
if self.rot:
self.rot *= 2 * math.pi / 360
self.size = size
if not self.pos:
raise ValueError('Position is required')
@classmethod
def parser(cls):
box_contents = Each(obstacle_items)
box = Dict(Keyword('box').suppress() + box_contents + end)
box.setParseAction(lambda toks: cls(**dict(toks)))
return box
class Base(object):
"""A BZFlag Base. One per team."""
def __init__(self, color=None, pos=None, position=None, rot=None,
rotation=None, size=None):
self.color = constants.COLORNAME[color]
self.pos = pos or position
self.rot = rot or rotation
if self.rot:
self.rot *= 2 * math.pi / 360
self.size = size
if self.color is None:
raise ValueError('Color is required')
if not self.pos:
raise ValueError('Position is required')
@classmethod
def parser(cls):
color = Group(Keyword('color') + integer)
base_contents = Each([color] + obstacle_items)
base = Dict(Keyword('base').suppress() + base_contents + end)
base.setParseAction(lambda toks: cls(**dict(toks)))
return base
class World(object):
"""Encompassing class which parses the entire file. Returns a World
object that is used by the classes in :mod:`game` to populate the
game.
"""
def __init__(self, WIDTH, HEIGHT, items=None):
self.size = (WIDTH, HEIGHT)
self.width = WIDTH
self.height = HEIGHT
self.boxes = []
self.bases = []
if items:
for item in items:
if isinstance(item, Box):
self.boxes.append(item)
elif isinstance(item, Base):
self.bases.append(item)
else:
raise NotImplementedError('Unhandled world element.')
@classmethod
def parser(cls, width, height):
"""Parse a BZW file.
For now, we're only supporting a subset of BZW's allobjects.
"""
comment = '#' + SkipTo(LineEnd())
bzw = ZeroOrMore(Box.parser() | Base.parser()).ignore(comment)
bzw.setParseAction(lambda toks: cls(width, height, toks))
return bzw
if __name__ == '__main__':
f = open('maps/four_ls.bzw')
parser = World.parser()
w = parser.parseString(f.read())
print w
# vim: et sw=4 sts=4
| gpl-3.0 |
drglove/SickRage | sickbeard/notifiers/freemobile.py | 5 | 4897 | # Author: Marvin Pinto <[email protected]>
# Author: Dennis Lutter <[email protected]>
# Author: Aaron Bieber <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import httplib
import urllib, urllib2
import time
import sickbeard
from sickbeard import logger
from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD, NOTIFY_SUBTITLE_DOWNLOAD, NOTIFY_GIT_UPDATE, NOTIFY_GIT_UPDATE_TEXT
from sickbeard.exceptions import ex
class FreeMobileNotifier:
def test_notify(self, id=None, apiKey=None):
return self._notifyFreeMobile('Test', "This is a test notification from SickRage", id, apiKey, force=True)
def _sendFreeMobileSMS(self, title, msg, id=None, apiKey=None):
"""
Sends a SMS notification
msg: The message to send (unicode)
title: The title of the message
userKey: The pushover user id to send the message to (or to subscribe with)
returns: True if the message succeeded, False otherwise
"""
if id == None:
id = sickbeard.FREEMOBILE_ID
if apiKey == None:
apiKey = sickbeard.FREEMOBILE_APIKEY
logger.log("Free Mobile in use with API KEY: " + apiKey, logger.DEBUG)
# build up the URL and parameters
msg = msg.strip()
msg_quoted = urllib2.quote(title + ": " + msg)
URL = "https://smsapi.free-mobile.fr/sendmsg?user=" + id + "&pass=" + apiKey + "&msg=" + msg_quoted
req = urllib2.Request(URL)
# send the request to Free Mobile
try:
reponse = urllib2.urlopen(req)
except IOError, e:
if hasattr(e,'code'):
if e.code == 400:
message = "Missing parameter(s)."
logger.log(message, logger.ERROR)
return False, message
if e.code == 402:
message = "Too much SMS sent in a short time."
logger.log(message, logger.ERROR)
return False, message
if e.code == 403:
message = "API service isn't enabled in your account or ID / API key is incorrect."
logger.log(message, logger.ERROR)
return False, message
if e.code == 500:
message = "Server error. Please retry in few moment."
logger.log(message, logger.ERROR)
return False, message
message = "Free Mobile SMS successful."
logger.log(message, logger.INFO)
return True, message
def notify_snatch(self, ep_name, title=notifyStrings[NOTIFY_SNATCH]):
if sickbeard.FREEMOBILE_NOTIFY_ONSNATCH:
self._notifyFreeMobile(title, ep_name)
def notify_download(self, ep_name, title=notifyStrings[NOTIFY_DOWNLOAD]):
if sickbeard.FREEMOBILE_NOTIFY_ONDOWNLOAD:
self._notifyFreeMobile(title, ep_name)
def notify_subtitle_download(self, ep_name, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
if sickbeard.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notifyFreeMobile(title, ep_name + ": " + lang)
def notify_git_update(self, new_version = "??"):
if sickbeard.USE_FREEMOBILE:
update_text=notifyStrings[NOTIFY_GIT_UPDATE_TEXT]
title=notifyStrings[NOTIFY_GIT_UPDATE]
self._notifyFreeMobile(title, update_text + new_version)
def _notifyFreeMobile(self, title, message, id=None, apiKey=None, force=False):
"""
Sends a SMS notification
title: The title of the notification to send
message: The message string to send
id: Your Free Mobile customer ID
apikey: Your Free Mobile API key
force: Enforce sending, for instance for testing
"""
if not sickbeard.USE_FREEMOBILE and not force:
logger.log("Notification for Free Mobile not enabled, skipping this notification", logger.DEBUG)
return False
logger.log("Sending a SMS for " + message, logger.DEBUG)
return self._sendFreeMobileSMS(title, message, id, apiKey)
notifier = FreeMobileNotifier
| gpl-3.0 |
olsonse/linuxcnc | src/emc/usr_intf/pncconf/tests.py | 5 | 79931 | #!/usr/bin/env python2.4
# -*- encoding: utf-8 -*-
# This is pncconf, a graphical configuration editor for LinuxCNC
# Chris Morley copyright 2009
# This is based from stepconf, a graphical configuration editor for linuxcnc
# Copyright 2007 Jeff Epler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USAimport os
import os
import gtk
import time
class TESTS:
def __init__(self,app):
# access to:
self.d = app.d # collected data
global SIG
SIG = app._p # private data (signals names)
global _PD
_PD = app._p # private data
self.a = app # The parent, pncconf
self.w = app.widgets
global debug
debug = self.a.debugstate
global dbg
dbg = self.a.dbg
def parporttest(self,w):
if not self.a.check_for_rt():
return
panelname = os.path.join(_PD.DISTDIR, "configurable_options/pyvcp")
halrun = os.popen("cd %(panelname)s\nhalrun -Is > /dev/null "% {'panelname':panelname,}, "w" )
if debug:
halrun.write("echo\n")
halrun.write("loadrt threads period1=100000 name1=base-thread fp1=0 period2=%d name2=servo-thread\n"% self.d.servoperiod)
load,read,write = self.a.pport_command_string()
for i in load:
halrun.write('%s\n'%i)
for i in range(0,self.d.number_pports ):
halrun.write("loadusr -Wn parport%(number)dtest pyvcp -g +%(pos)d+0 -c parport%(number)dtest %(panel)s\n"
% {'pos':(i*300),'number':i,'panel':"parportpanel.xml\n",})
halrun.write("loadrt or2 count=%d\n"%(self.d.number_pports * 12))
for i in read:
halrun.write('%s\n'%i)
for i in range(0,(self.d.number_pports * 12)):
halrun.write("addf or2.%d base-thread\n"% i)
halrun.write("loadusr halmeter pin parport.0.pin-01-out -g 0 500\n")
for i in write:
halrun.write('%s\n'%i)
# print out signals to help page:
signaltext=''
portname = 'pp1'
for pin in (2,3,4,5,6,7,8,9,10,11,12,13,15):
pinv = '%s_Ipin%d_inv' % (portname, pin)
signaltree = self.d._gpioisignaltree
signaltocheck =_PD.hal_input_names
p, signal, invert = self.a.pport_push_data(portname,'Ipin',pin,pinv,signaltree,signaltocheck)
signaltext += '%s %s %s\n'%(p,signal,invert)
# check output pins
for pin in (1,2,3,4,5,6,7,8,9,14,16,17):
pinv = '%s_Opin%d_inv' % (portname, pin)
signaltree = self.d._gpioosignaltree
signaltocheck = _PD.hal_output_names
p, signal, invert = self.a.pport_push_data(portname,'Opin',pin,pinv,signaltree,signaltocheck)
signaltext += '%s %s %s\n'%(p,signal,invert)
textbuffer = self.w.textoutput.get_buffer()
try :
textbuffer.set_text(signaltext)
self.w.helpnotebook.set_current_page(2)
self.w.help_window.show_all()
while gtk.events_pending():
gtk.main_iteration()
except:
text = _("Pin names are unavailable\n")
self.a.warning_dialog(text,True)
templist = ("pp1","pp2","pp3")
for j in range(self.d.number_pports):
if self.d[templist[j]+"_direction"] == 1:
inputpins = (10,11,12,13,15)
outputpins = (1,2,3,4,5,6,7,8,9,14,16,17)
for x in (2,3,4,5,6,7,8,9):
halrun.write( "setp parport%dtest.led.%d.disable true\n"%(j, x))
halrun.write( "setp parport%dtest.led_text.%d.disable true\n"%(j, x))
else:
inputpins = (2,3,4,5,6,7,8,9,10,11,12,13,15)
outputpins = (1,14,16,17)
for x in (2,3,4,5,6,7,8,9):
halrun.write( "setp parport%dtest.button.%d.disable true\n"% (j , x))
halrun.write( "setp parport%dtest.button_text.%d.disable true\n"% (j , x))
for x in inputpins:
i = self.w["%s_Ipin%d_inv" % (templist[j], x)].get_active()
if i: halrun.write( "net red_in_not.%d parport%dtest.led.%d <= parport.%d.pin-%02d-in-not\n" % (x, j, x, j, x))
else: halrun.write( "net red_in.%d parport%dtest.led.%d <= parport.%d.pin-%02d-in\n" % (x, j, x, j ,x))
for num, x in enumerate(outputpins):
i = self.w["%s_Opin%d_inv" % (templist[j], x)].get_active()
if i: halrun.write( "setp parport.%d.pin-%02d-out-invert true\n" %(j, x))
halrun.write("net signal_out%d or2.%d.out parport.%d.pin-%02d-out\n"% (x, num, j, x))
halrun.write("net pushbutton.%d or2.%d.in1 parport%dtest.button.%d\n"% (x, num, j, x))
halrun.write("net latchbutton.%d or2.%d.in0 parport%dtest.checkbutton.%d\n"% (x, num, j, x))
halrun.write("start\n")
halrun.write("waitusr parport0test\n"); halrun.flush()
halrun.close()
self.w['window1'].set_sensitive(1)
# This is for pyvcp test panel
def testpanel(self,w):
pos = "+0+0"
size = ""
panelname = os.path.join(_PD.DISTDIR, "configurable_options/pyvcp")
if self.w.pyvcpblank.get_active() == True:
return True
if self.w.pyvcp1.get_active() == True:
panel = "spindle.xml"
if self.w.pyvcp2.get_active() == True:
panel = "xyzjog.xml"
if self.w.pyvcpexist.get_active() == True:
panel = "pyvcp-panel.xml"
panelname = os.path.expanduser("~/linuxcnc/configs/%s" % self.d.machinename)
if self.w.pyvcpposition.get_active() == True:
xpos = self.w.pyvcpxpos.get_value()
ypos = self.w.pyvcpypos.get_value()
pos = "+%d+%d"% (xpos,ypos)
if self.w.pyvcpsize.get_active() == True:
width = self.w.pyvcpwidth.get_value()
height = self.w.pyvcpheight.get_value()
size = "%dx%d"% (width,height)
halrun = os.popen("cd %(panelname)s\nhalrun -Is > /dev/null"% {'panelname':panelname,}, "w" )
if debug:
halrun.write("echo\n")
halrun.write("loadusr -Wn displaytest pyvcp -g %(size)s%(pos)s -c displaytest %(panel)s\n" %{'size':size,'pos':pos,'panel':panel,})
if self.w.pyvcp1.get_active() == True:
halrun.write("setp displaytest.spindle-speed 1000\n")
halrun.write("waitusr displaytest\n")
halrun.flush()
halrun.close()
def display_gladevcp_panel(self):
pos = "+0+0"
size = "200x200"
options = ""
folder = "/tmp"
if not self.w.createconfig.get_active() and self.w.gladeexists.get_active():
folder = os.path.expanduser("~/linuxcnc/configs/%s" % self.d.machinename)
if not os.path.exists(folder + "/gvcp-panel.ui"):
self.a.warning_dialog (_("""You specified there is an existing gladefile, \
But there is not one in the machine-named folder.."""),True)
return
self.gladevcptestpanel(self)
if self.w.gladevcpposition.get_active() == True:
xpos = self.w.gladevcpxpos.get_value()
ypos = self.w.gladevcpypos.get_value()
pos = "+%d+%d"% (xpos,ypos)
if self.w.gladevcpsize.get_active() == True:
width = self.w.gladevcpwidth.get_value()
height = self.w.gladevcpheight.get_value()
size = "%dx%d"% (width,height)
if not self.w.gladevcptheme.get_active_text() == "Follow System Theme":
options ="-t %s"% (self.w.gladevcptheme.get_active_text())
print options
halrun = os.popen("cd %s\nhalrun -Is > /dev/null"%(folder), "w" )
if debug:
halrun.write("echo\n")
halrun.write("loadusr -Wn displaytest gladevcp -g %(size)s%(pos)s -c displaytest %(option)s gvcp-panel.ui\n" %{
'size':size,'pos':pos,'option':options})
if self.w.spindlespeedbar.get_active():
halrun.write("setp displaytest.spindle-speed 500\n")
if self.w.zerox.get_active():
halrun.write("setp displaytest.zero-x-active true\n")
if self.w.zeroy.get_active():
halrun.write("setp displaytest.zero-y-active true\n")
if self.w.zeroz.get_active():
halrun.write("setp displaytest.zero-z-active true\n")
if self.w.zeroa.get_active():
halrun.write("setp displaytest.zero-a-active true\n")
if self.w.autotouchz.get_active():
halrun.write("setp displaytest.auto-touch-z-active true\n")
if self.w.spindleatspeed.get_active():
halrun.write("setp displaytest.spindle-at-speed-led true\n")
halrun.write("setp displaytest.button-box-active true\n")
halrun.write("waitusr displaytest\n")
halrun.flush()
halrun.close()
def gladevcptestpanel(self,w):
directory = "/tmp/"
filename = os.path.join(directory, "gvcp-panel.ui")
file = open(filename, "w")
print >>file, ("""<?xml version="1.0"?>
<interface>
<!-- interface-requires gladevcp 0.0 -->
<requires lib="gtk+" version="2.16"/>
<!-- interface-naming-policy project-wide -->
<object class="GtkWindow" id="window1">
<property name="width_request">100</property>
<child>
<object class="GtkVBox" id="vbox1">
<property name="visible">True</property>""")
if self.w.spindlespeedbar.get_active():
print >>file, ("""
<child>
<object class="HAL_HBar" id="spindle-speed">
<property name="visible">True</property>
<property name="force_height">36</property>""")
print >>file, ("""<property name="max">%(maxrpm)d</property>"""%{'maxrpm':self.w.maxspeeddisplay.get_value() })
print >>file, ("""
<property name="z0_color">#0000ffff0000</property>
<property name="value">44.25</property>
<property name="z1_color">#ffffffff0000</property>
<property name="bg_color">#bebebebebebe</property>
<property name="text_template">Spindle: % 4d RPM</property>
<property name="z0_border">0.94999998807907104</property>
<property name="z2_color">#ffff00000000</property>
<property name="show_limits">False</property>
</object>
<packing>
<property name="expand">False</property>
<property name="position">0</property>
</packing>
</child>""" )
if self.w.spindleatspeed.get_active():
print >>file, ("""
<child>
<object class="GtkHBox" id="hbox1">
<property name="visible">True</property>
<child>
<object class="GtkLabel" id="label1">
<property name="visible">True</property>
<property name="ypad">5</property>
<property name="label" translatable="yes"> Spindle Up To Speed </property>
</object>
<packing>
<property name="expand">False</property>
<property name="fill">False</property>
<property name="position">0</property>
</packing>
</child>
<child>
<object class="HAL_LED" id="spindle-at-speed-led">
<property name="visible">True</property>
<property name="led_shape">2</property>
<property name="on_color">green</property>
<property name="led_size">5</property>
</object>
<packing>
<property name="expand">False</property>
<property name="fill">False</property>
<property name="padding">10</property>
<property name="position">1</property>
</packing>
</child>
</object>
<packing>
<property name="expand">False</property>
<property name="position">1</property>
</packing>
</child>""")
print >>file, ("""
<child>
<object class="HAL_Table" id="button-box-active">
<property name="visible">True</property>
<property name="n_rows">5</property>
<property name="homogeneous">False</property>""")
if self.w.autotouchz.get_active():
print >>file, ("""
<child>
<object class="HAL_HBox" id="auto-touch-z-active">
<property name="visible">True</property>
<child>
<object class="HAL_Button" id="auto-touch-z">
<property name="label" translatable="yes">Z Auto Touch Off</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
<property name="yalign">0.56000000238418579</property>
</object>
<packing>
<property name="position">0</property>
</packing>
</child>
</object>
<packing>
<property name="top_attach">4</property>
<property name="bottom_attach">5</property>
</packing>
</child>""")
if self.w.zeroa.get_active():
print >>file, ("""
<child>
<object class="HAL_HBox" id="zero-a-active">
<property name="visible">True</property>
<child>
<object class="HAL_Button" id="zero-a">
<property name="label" translatable="yes">Zero A</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
</object>
<packing>
<property name="position">0</property>
</packing>
</child>
</object>
<packing>
<property name="top_attach">3</property>
<property name="bottom_attach">4</property>
</packing>
</child>""")
if self.w.zeroz.get_active():
print >>file, ("""
<child>
<object class="HAL_HBox" id="zero-z-active">
<property name="visible">True</property>
<child>
<object class="HAL_Button" id="zero-z">
<property name="label" translatable="yes">Zero Z</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
</object>
<packing>
<property name="position">0</property>
</packing>
</child>
</object>
<packing>
<property name="top_attach">2</property>
<property name="bottom_attach">3</property>
</packing>
</child>""")
if self.w.zeroy.get_active():
print >>file, ("""
<child>
<object class="HAL_HBox" id="zero-y-active">
<property name="visible">True</property>
<child>
<object class="HAL_Button" id="zero-y">
<property name="label" translatable="yes">Zero Y</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
</object>
<packing>
<property name="position">0</property>
</packing>
</child>
</object>
<packing>
<property name="top_attach">1</property>
<property name="bottom_attach">2</property>
</packing>
</child>""")
if self.w.zerox.get_active():
print >>file, ("""
<child>
<object class="HAL_HBox" id="zero-x-active">
<property name="visible">True</property>
<child>
<object class="HAL_Button" id="zero-x">
<property name="label" translatable="yes">Zero X</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
</object>
<packing>
<property name="position">0</property>
</packing>
</child>
</object>
</child>""")
print >>file, ("""
</object>
<packing>
<property name="expand">False</property>
<property name="fill">False</property>
<property name="position">2</property>
</packing>
</child>
</object>
</child>
</object>
</interface>""")
file.close()
# for classicladder test
def load_ladder(self,w):
newfilename = os.path.join(_PD.DISTDIR, "configurable_options/ladder/TEMP.clp")
self.d.modbus = self.w.modbus.get_active()
halrun = os.popen("halrun -Is > /dev/null", "w")
if debug:
halrun.write("echo\n")
halrun.write("""
loadrt threads period1=%(period)d name1=base-thread fp1=0 period2=%(period2)d name2=servo-thread
loadrt classicladder_rt numPhysInputs=%(din)d numPhysOutputs=%(dout)d numS32in=%(sin)d\
numS32out=%(sout)d numFloatIn=%(fin)d numFloatOut=%(fout)d numBits=%(bmem)d numWords=%(wmem)d
addf classicladder.0.refresh servo-thread
start\n""" % {
'din': self.w.digitsin.get_value(),
'dout': self.w.digitsout.get_value(),
'sin': self.w.s32in.get_value(),
'sout': self.w.s32out.get_value(),
'fin':self.w.floatsin.get_value(),
'fout':self.w.floatsout.get_value(),
'bmem':self.w.bitmem.get_value(),
'wmem':self.w.wordmem.get_value(),
'period':100000,
'period2':self.d.servoperiod
})
if self.w.ladderexist.get_active() == True:
if self.d.tempexists:
self.d.laddername='TEMP.clp'
else:
self.d.laddername= 'blank.clp'
if self.w.ladder1.get_active() == True:
self.d.laddername= 'estop.clp'
if self.w.ladder2.get_active() == True:
self.d.laddername = 'serialmodbus.clp'
self.d.modbus = True
self.w.modbus.set_active(True)
if self.w.laddertouchz.get_active() == True:
self.d.laddertouchz = True
self.d.laddername = 'touchoff_z.clp'
self.d.halui = True
self.w.halui.set_active(True)
if self.w.ladderexist.get_active() == True:
self.d.laddername='custom.clp'
originalfile = filename = os.path.expanduser("~/linuxcnc/configs/%s/custom.clp" % self.d.machinename)
else:
filename = os.path.join(_PD.DISTDIR, "configurable_options/ladder/"+ self.d.laddername)
if self.d.modbus == True:
halrun.write("loadusr -w classicladder --modmaster --newpath=%(newname)s %(filename)s\n" % {
'newname':newfilename,'filename':filename})
else:
halrun.write("loadusr -w classicladder --newpath=%(newname)s %(filename)s\n" % { 'newname':newfilename ,'filename':filename })
halrun.write("start\n")
halrun.flush()
halrun.close()
if os.path.exists(newfilename):
self.d.tempexists = True
self.w.newladder.set_text('Edited ladder program')
self.w.ladderexist.set_active(True)
else:
self.d.tempexists = 0
# servo and stepper test
def tune_axis(self, axis):
def get_value(d):
return self.a.get_value(d)
if not self.a.check_for_rt():
return
d = self.d
w = self.w
self.updaterunning = False
self.scale = self.enc_scale = 1000
axnum = "xyzas".index(axis)
self.axis_under_tune = axis
step_sig = self.a.stepgen_sig(axis)
self.stepgen = self.a.stepgen_sig(axis)
#print axis," stepgen--",self.stepgen
self.encoder = self.a.encoder_sig(axis)
#print axis," encoder--",self.encoder
pwm_sig = self.a.pwmgen_sig(axis)
self.pwm = self.d.make_pinname(pwm_sig)
#print axis," pwgen--",self.pwmgen
pump = self.d.findsignal("charge-pump")
if self.stepgen:
state = True
w.xtuningnotebook.set_current_page(1)
else:
state = False
w.xtuningnotebook.set_current_page(0)
text = _("Servo tuning is not tested in PNCconf yet\n")
self.a.warning_dialog(text,True)
#w.xtuneinvertencoder.set_sensitive(not state)
if self.stepgen:
w.xpidtable.set_sensitive(self.d.advanced_option)
w.xstep.set_sensitive(state)
w.xsteptable.set_sensitive(state)
distance = 2
if axis == "a":
w,xtunedistunits.set_text(_("degrees"))
w.xtunevelunits.set_text(_("degrees / minute"))
w.xtuneaccunits.set_text(_("degrees / second²"))
distance = 360
elif axis == "s":
w.xtunedistunits.set_text(_("revolutions"))
w.xtunevelunits.set_text(_("rpm"))
w.xtuneaccunits.set_text(_("revs / second²"))
distance = 100
elif d.units == _PD._METRIC:
w.xtunedistunits.set_text(_("mm"))
w.xtunevelunits.set_text(_("mm / minute"))
w.xtuneaccunits.set_text(_("mm / second²"))
distance = 50
else:
w.xtunedistunits.set_text(_("inches"))
w.xtunevelunits.set_text(_("inches / minute"))
w.xtuneaccunits.set_text(_("inches / second²"))
w.xtuneamplitude.set_value(distance)
w.xtunepause.set_value(1)
w.xtunevel.set_value(get_value(w[axis+"maxvel"]))
w.xtuneacc.set_value(get_value(w[axis+"maxacc"]))
w.xtunecurrentP.set_value(w[axis+"P"].get_value())
w.xtuneorigP.set_text("%s" % w[axis+"P"].get_value())
w.xtunecurrentI.set_value(w[axis+"I"].get_value())
w.xtuneorigI.set_text("%s" % w[axis+"I"].get_value())
w.xtunecurrentD.set_value(w[axis+"D"].get_value())
w.xtuneorigD.set_text("%s" % w[axis+"D"].get_value())
w.xtunecurrentFF0.set_value(w[axis+"FF0"].get_value())
w.xtuneorigFF0.set_text("%s" % w[axis+"FF0"].get_value())
w.xtunecurrentFF1.set_value(w[axis+"FF1"].get_value())
w.xtuneorigFF1.set_text("%s" % w[axis+"FF1"].get_value())
w.xtunecurrentFF2.set_value(w[axis+"FF2"].get_value())
w.xtuneorigFF2.set_text("%s" % w[axis+"FF2"].get_value())
w.xtunecurrentbias.set_value(w[axis+"bias"].get_value())
w.xtuneorigbias.set_text("%s" % w[axis+"bias"].get_value())
w.xtunecurrentdeadband.set_value(w[axis+"deadband"].get_value())
w.xtuneorigdeadband.set_text("%s" % w[axis+"deadband"].get_value())
w.xtunecurrentsteptime.set_value(w[axis+"steptime"].get_value())
w.xtuneorigsteptime.set_text("%s" % w[axis+"steptime"].get_value())
w.xtunecurrentstepspace.set_value(get_value(w[axis+"stepspace"]))
w.xtuneorigstepspace.set_text("%s" % w[axis+"stepspace"].get_value())
w.xtunecurrentdirhold.set_value(get_value(w[axis+"dirhold"]))
w.xtuneorigdirhold.set_text("%s" % w[axis+"dirhold"].get_value())
w.xtunecurrentdirsetup.set_value(get_value(w[axis+"dirsetup"]))
w.xtuneorigdirsetup.set_text("%s" % w[axis+"dirsetup"].get_value())
self.tunejogplus = self.tunejogminus = 0
w.xtunedir.set_active(0)
w.xtunerun.set_active(0)
#w.xtuneinvertmotor.set_active(w[axis+"invertmotor"].get_active())
#w.xtuneinvertencoder.set_active(w[axis+"invertencoder"].get_active())
dac_scale = get_value(w[axis+"outputscale"])
if axis == "s":
pwmmaxlimit = get_value(w.soutputscale)
max_voltage_factor = 10.0/get_value(w.soutputmaxvoltage) # voltagelimit
pwmmaxoutput = pwmmaxlimit * max_voltage_factor
if w.susenegativevoltage.get_active():
pwmminlimit = -pwmmaxlimit
else:
pwmminlimit = 0
else:
pwmminlimit = get_value(w[axis+"outputminlimit"])
pwmmaxlimit = get_value(w[axis+"outputmaxlimit"])
pwmmaxoutput = get_value(w[axis+"outputscale"])
self.halrun = halrun = os.popen("halrun -Is > /dev/null", "w")
if debug:
halrun.write("echo\n")
halrun.write(" loadrt threads fp1=1 period1=%d name1=servo-thread\n"%
(self.d.servoperiod ))
halrun.write(" loadusr halscope\n")
halrun.write(" loadrt scale names=scale_to_rpm\n")
halrun.write(" loadrt axistest\n")
halrun.write(" loadrt simple_tp \n")
halrun.write(" net target-cmd <= axistest.0.position-cmd\n")
halrun.write(" net pos-fb => axistest.0.position-fb\n")
halrun.write(" net enable => simple-tp.0.enable\n")
halrun.write(" net target-cmd => simple-tp.0.target-pos\n")
halrun.write(" net pos-cmd <= simple-tp.0.current-pos\n")
halrun.write(" net vel-cmd <= simple-tp.0.current-vel\n")
halrun.write(" loadrt pid num_chan=1\n")
# search and record the commands for I/O cards
load,read,write = self.a.hostmot2_command_string()
# do I/O load commands
for i in load:
halrun.write('%s\n'%i)
# do I/O read commands
for i in read:
halrun.write('%s\n'%i)
if pump:
halrun.write( "loadrt charge_pump\n")
halrun.write( "net enable charge-pump.enable\n")
halrun.write( "net charge-pump <= charge-pump.out\n")
halrun.write( "addf charge-pump servo-thread\n")
halrun.write("addf axistest.0.update servo-thread \n")
halrun.write("addf simple-tp.0.update servo-thread \n")
halrun.write("addf pid.0.do-pid-calcs servo-thread \n")
halrun.write("addf scale_to_rpm servo-thread \n")
# do I/O write comands
for i in write:
halrun.write('%s\n'%i)
halrun.write( "newsig estop-out bit\n")
halrun.write( "sets estop-out false\n")
halrun.write("setp pid.0.Pgain %d\n"% ( w[axis+"P"].get_value() ))
halrun.write("setp pid.0.Igain %d\n"% ( w[axis+"I"].get_value() ))
halrun.write("setp pid.0.Dgain %d\n"% ( w[axis+"D"].get_value() ))
halrun.write("setp pid.0.bias %d\n"% ( w[axis+"bias"].get_value() ))
halrun.write("setp pid.0.FF0 %d\n"% ( w[axis+"FF0"].get_value() ))
halrun.write("setp pid.0.FF1 %d\n"% ( w[axis+"FF1"].get_value() ))
halrun.write("setp pid.0.FF2 %d\n"% ( w[axis+"FF2"].get_value() ))
halrun.write("setp pid.0.deadband %d\n"% ( w[axis+"deadband"].get_value() ))
halrun.write("setp pid.0.maxoutput %d\n"% ( w[axis+"maxoutput"].get_value() ))
halrun.write("setp pid.0.error-previous-target true\n")
if self.stepgen:
halrun.write("setp pid.0.maxerror .0005\n")
halrun.write("net enable => pid.0.enable\n")
halrun.write("net output <= pid.0.output\n")
halrun.write("net pos-cmd => pid.0.command\n")
halrun.write("net vel-cmd => pid.0.command-deriv\n")
halrun.write("net pos-fb => pid.0.feedback\n")
# search and connect I/o signals needed to enable amps etc
self.hal_test_signals(axis)
# for encoder signals
if self.encoder:
#print self.encoder,"--",self.encoder[4:5],self.encoder[10:],self.encoder[6:7]
self.enc_signalname = self.d.make_pinname(self.encoder)
if w[axis+"invertmotor"].get_active():
self.enc_scale = get_value(w[axis + "encoderscale"]) * -1
else:
self.enc_scale = get_value(w[axis + "encoderscale"])
halrun.write("setp %s.counter-mode %s\n"% (self.enc_signalname, w.ssingleinputencoder.get_active()))
halrun.write("setp %s.filter 1\n"% (self.enc_signalname))
halrun.write("setp %s.index-invert 0\n"% (self.enc_signalname))
halrun.write("setp %s.index-mask 0\n"% (self.enc_signalname))
halrun.write("setp %s.index-mask-invert 0\n"% (self.enc_signalname))
halrun.write("setp %s.scale %d\n"% (self.enc_signalname, self.enc_scale))
halrun.write("loadusr halmeter -s pin %s.velocity -g 0 625 330\n"% (self.enc_signalname))
halrun.write("loadusr halmeter -s pin %s.position -g 0 675 330\n"% (self.enc_signalname))
halrun.write("loadusr halmeter pin %s.velocity -g 275 415\n"% (self.enc_signalname))
halrun.write("net pos-fb <= %s.position \n"% (self.enc_signalname))
# setup pwm generator
if self.pwm:
print self.pwm
if "pwm" in self.pwm: # mainboard PWM
pwmtype = self.d[pwm_sig+"type"]
if pwmtype == _PD.PWMP: pulsetype = 1
elif pwmtype == _PD.PDMP: pulsetype = 3
elif pwmtype == _PD.UDMU: pulsetype = 2
else:
print "**** ERROR PNCCONF- PWM type not recognized in tune test"
return
halrun.write("setp %s %d \n"% (self.pwm +".output-type", pulsetype))
halrun.write("net enable %s \n"% (self.pwm +".enable"))
halrun.write("setp %s \n"% (self.pwm +".scale %f"% dac_scale))
ending = ".value"
pwminvertlist = self.a.pwmgen_invert_pins(pwm_sig)
for i in pwminvertlist:
halrun.write("setp "+i+".invert_output true\n")
else: # sserial PWM
pwm_enable = self.d.make_pinname(pwm_sig,False,True) # get prefix only
halrun.write("net enable %s \n"% (pwm_enable +"analogena"))
halrun.write("setp "+self.pwm+"-minlim %.1f\n"% pwmminlimit)
halrun.write("setp "+self.pwm+"-maxlim %.1f\n"% pwmmaxlimit)
halrun.write("setp "+self.pwm+"-scalemax %.1f\n"% pwmmaxoutput)
ending = ""
halrun.write("net output %s \n"% (self.pwm + ending))
halrun.write("loadusr halmeter -s pin %s -g 0 575 330\n"% (self.pwm + ending))
halrun.write("loadusr halmeter pin %s -g 0 550 375\n"% (self.pwm + ending) )
halrun.write("loadusr halmeter -s sig enable -g 0 525 330\n")
# for step gen components
if self.stepgen:
# check current component number to signal's component number
self.step_signalname = self.d.make_pinname(self.stepgen)
#print "step_signal--",self.step_signalname
if w[axis+"invertmotor"].get_active():
self.scale = get_value(w[axis + "stepscale"]) * -1
else:
self.scale = get_value(w[axis + "stepscale"]) * 1
stepinvertlist = self.a.stepgen_invert_pins(step_sig)
for i in stepinvertlist:
halrun.write("setp "+i+".invert_output true\n")
halrun.write("setp %s.step_type 0 \n"% (self.step_signalname))
halrun.write("setp %s.control-type 1 \n"% (self.step_signalname))
halrun.write("setp %s.position-scale %f \n"% (self.step_signalname,self.scale))
halrun.write("setp %s.steplen %d \n"% (self.step_signalname,w[axis+"steptime"].get_value()))
halrun.write("setp %s.stepspace %d \n"% (self.step_signalname,w[axis+"stepspace"].get_value()))
halrun.write("setp %s.dirhold %d \n"% (self.step_signalname,w[axis+"dirhold"].get_value()))
halrun.write("setp %s.dirsetup %d \n"% (self.step_signalname,w[axis+"dirsetup"].get_value()))
halrun.write("setp axistest.0.epsilon %f\n"% abs(1. / get_value(w[axis + "stepscale"])) )
halrun.write("setp %s.maxaccel %f \n"% (self.step_signalname,get_value(w[axis+"maxacc"])*1.25))
halrun.write("setp %s.maxvel %f \n"% (self.step_signalname,get_value(w[axis+"maxvel"])*1.25))
halrun.write("net enable => %s.enable\n"% (self.step_signalname))
halrun.write("net output => %s.velocity-cmd \n"% (self.step_signalname))
halrun.write("net pos-fb <= %s.position-fb \n"% (self.step_signalname))
halrun.write("net speed_rps scale_to_rpm.in <= %s.velocity-fb \n"% (self.step_signalname))
halrun.write("net speed_rpm scale_to_rpm.out\n")
halrun.write("setp scale_to_rpm.gain 60\n")
halrun.write("loadusr halmeter sig speed_rpm -g 0 415\n")
halrun.write("loadusr halmeter -s pin %s.velocity-fb -g 0 575 350\n"% (self.step_signalname))
halrun.write("loadusr halmeter -s pin %s.position-fb -g 0 525 350\n"% (self.step_signalname))
self.w.xtuneenable.set_active(False)
# self.w.xtuneinvertmotor.set_sensitive(False)
self.w.xtuneamplitude.set_sensitive(False)
self.w.xtunedir.set_sensitive(False)
self.w.xtunejogminus.set_sensitive(False)
self.w.xtunejogplus.set_sensitive(False)
self.updaterunning = True
halrun.write("start\n")
halrun.flush()
w.tunedialog.set_title(_("%s Axis Tune") % axis.upper())
w.tunedialog.move(550,0)
w.tunedialog.show_all()
self.w['window1'].set_sensitive(0)
result = w.tunedialog.run()
w.tunedialog.hide()
if result == gtk.RESPONSE_OK:
w[axis+"maxvel"].set_value( get_value(w.xtunevel))
w[axis+"maxacc"].set_value( get_value(w.xtuneacc))
w[axis+"P"].set_value( get_value(w.xtunecurrentP))
w[axis+"I"].set_value( get_value(w.xtunecurrentI))
w[axis+"D"].set_value( get_value(w.xtunecurrentD))
w[axis+"FF0"].set_value( get_value(w.xtunecurrentFF0))
w[axis+"FF1"].set_value( get_value(w.xtunecurrentFF1))
w[axis+"FF2"].set_value( get_value(w.xtunecurrentFF2))
w[axis+"bias"].set_value( get_value(w.xtunecurrentbias))
w[axis+"deadband"].set_value( get_value(w.xtunecurrentdeadband))
w[axis+"bias"].set_value(w.xtunecurrentbias.get_value())
w[axis+"steptime"].set_value(get_value(w.xtunecurrentsteptime))
w[axis+"stepspace"].set_value(get_value(w.xtunecurrentstepspace))
w[axis+"dirhold"].set_value(get_value(w.xtunecurrentdirhold))
w[axis+"dirsetup"].set_value(get_value(w.xtunecurrentdirsetup))
#w[axis+"invertmotor"].set_active(w.xtuneinvertmotor.get_active())
#w[axis+"invertencoder"].set_active(w.xtuneinvertencoder.get_active())
halrun.write("sets enable false\n")
time.sleep(.001)
halrun.close()
self.w['window1'].set_sensitive(1)
def update_tune_test_params(self, *args):
axis = self.axis_under_tune
if axis is None or not self.updaterunning: return
temp = not self. w.xtunerun.get_active()
#self.w.xtuneinvertmotor.set_sensitive( temp)
self.w.xtuneamplitude.set_sensitive( temp)
self.w.xtunedir.set_sensitive( temp)
self.w.xtunejogminus.set_sensitive(temp)
self.w.xtunejogplus.set_sensitive(temp)
temp = self.w.xtuneenable.get_active()
if not self.w.xtunerun.get_active():
self.w.xtunejogminus.set_sensitive(temp)
self.w.xtunejogplus.set_sensitive(temp)
self.w.xtunerun.set_sensitive(temp)
halrun = self.halrun
if self.stepgen:
halrun.write("""
setp pid.0.Pgain %(p)f
setp pid.0.Igain %(i)f
setp pid.0.Dgain %(d)f
setp pid.0.bias %(bias)f
setp pid.0.FF0 %(ff0)f
setp pid.0.FF1 %(ff1)f
setp pid.0.FF2 %(ff2)f
setp pid.0.bias %(bias)f
setp pid.0.deadband %(deadband)f
setp %(stepgen)s.steplen %(len)d
setp %(stepgen)s.stepspace %(space)d
setp %(stepgen)s.dirhold %(hold)d
setp %(stepgen)s.dirsetup %(setup)d
setp %(stepgen)s.maxaccel %(accelps)f
setp %(stepgen)s.maxvel %(velps)f
setp %(stepgen)s.position-scale %(scale)f
setp axistest.0.jog-minus %(jogminus)s
setp axistest.0.jog-plus %(jogplus)s
setp axistest.0.run %(run)s
setp axistest.0.amplitude %(amplitude)f
setp simple-tp.0.maxvel %(vel)f
setp simple-tp.0.maxaccel %(accel)f
setp axistest.0.dir %(dir)s
setp axistest.0.pause %(pause)d
sets enable %(enable)s
sets estop-out %(estop)s
""" % {
'p':self.w.xtunecurrentP.get_value(),
'i':self.w.xtunecurrentI.get_value(),
'd':self.w.xtunecurrentD.get_value(),
'ff0':self.w.xtunecurrentFF0.get_value(),
'ff1':self.w.xtunecurrentFF1.get_value(),
'ff2':self.w.xtunecurrentFF2.get_value(),
'bias':self.w.xtunecurrentbias.get_value(),
'deadband':self.w.xtunecurrentdeadband.get_value(),
'scale':self.scale,
'len':self.w.xtunecurrentsteptime.get_value(),
'space':self.w.xtunecurrentstepspace.get_value(),
'hold':self.w.xtunecurrentdirhold.get_value(),
'setup':self.w.xtunecurrentdirsetup.get_value(),
'stepgen': self.step_signalname,
'jogminus': self.tunejogminus,
'jogplus': self.tunejogplus,
'run': self.w.xtunerun.get_active(),
'amplitude': self.w.xtuneamplitude.get_value(),
'accel': self.w.xtuneacc.get_value(),
'accelps': self.w.xtuneacc.get_value()*1.25,
'velps': self.w.xtunevel.get_value()/60*1.25,
'vel': (self.w.xtunevel.get_value()/60),
'dir': self.w.xtunedir.get_active(),
'pause':int(self.w.xtunepause.get_value()),
'enable':self.w.xtuneenable.get_active(),
'estop':(self.w.xtuneenable.get_active())
})
else:
halrun.write("""
setp pid.0.Pgain %(p)f
setp pid.0.Igain %(i)f
setp pid.0.Dgain %(d)f
setp pid.0.bias %(bias)f
setp pid.0.FF0 %(ff0)f
setp pid.0.FF1 %(ff1)f
setp pid.0.FF2 %(ff2)f
setp pid.0.bias %(bias)f
setp pid.0.deadband %(deadband)f
setp axistest.0.jog-minus %(jogminus)s
setp axistest.0.jog-plus %(jogplus)s
setp axistest.0.run %(run)s
setp axistest.0.amplitude %(amplitude)f
setp axistest.0.dir %(dir)s
setp axistest.0.pause %(pause)d
setp simple-tp.0.maxvel %(vel)f
setp simple-tp.0.maxaccel %(accel)f
sets enable %(enable)s
sets estop-out %(estop)s
""" % {
'p':self.w.xtunecurrentP.get_value(),
'i':self.w.xtunecurrentI.get_value(),
'd':self.w.xtunecurrentD.get_value(),
'ff0':self.w.xtunecurrentFF0.get_value(),
'ff1':self.w.xtunecurrentFF1.get_value(),
'ff2':self.w.xtunecurrentFF2.get_value(),
'bias':self.w.xtunecurrentbias.get_value(),
'deadband':self.w.xtunecurrentdeadband.get_value(),
'jogminus': self.tunejogminus,
'jogplus': self.tunejogplus,
'run': self.w.xtunerun.get_active(),
'amplitude': self.w.xtuneamplitude.get_value(),
'accel': self.w.xtuneacc.get_value(),
'vel': self.w.xtunevel.get_value(),
'velps': (self.w.xtunevel.get_value()/60),
'dir': self.w.xtunedir.get_active(),
'pause':int(self.w.xtunepause.get_value()),
'enable':self.w.xtuneenable.get_active(),
'estop':(self.w.xtuneenable.get_active())
})
if self.encoder:
halrun.write("""
setp %(encoder)s.scale %(enc_scale)d
""" % {
'encoder':self.enc_signalname,
' enc_scale':self.enc_scale,
})
halrun.flush()
def tune_jogminus(self, direction):
self.tunejogminus = direction
self.update_tune_test_params()
def tune_jogplus(self, direction):
self.tunejogplus = direction
self.update_tune_test_params()
def toggle_tuneinvertmotor(self):
def get_value(d):
return self.a.get_value(d)
axis = self.axis_under_tune
w = self.w
if w.xtuneinvertmotor.get_active():
self.scale = get_value(w[axis + "stepscale"]) * -1
else:
self.scale = get_value(w[axis + "stepscale"])
if w.xtuneinvertencoder.get_active():
self.enc_scale = get_value(w[axis + "encoderscale"]) * -1
else:
self.enc_scale = get_value(w[axis + "encoderscale"])
self.update_tune_test_params()
# openloop servo test
def test_axis(self, axis):
def get_value(d):
return self.a.get_value(d)
# can't test with a simulator
if not self.a.check_for_rt():
return
# one needs real time, pwm gen and an encoder for open loop testing.
temp = self.d.findsignal( (axis + "-encoder-a"))
self.enc = self.d.make_pinname(temp)
temp = self.d.findsignal( (axis + "-resolver"))
self.res = self.d.make_pinname(temp)
pwm_sig = self.d.findsignal( (axis + "-pwm-pulse"))
self.pwm = self.d.make_pinname(pwm_sig)
pot_sig = self.d.findsignal(axis+"-pot-output")
self.pot = self.d.make_pinname(pot_sig)
if axis == "s":
if (not self.pwm and not self.pot) and (not self.enc and not self.res):
self.a.warning_dialog( _(" You must designate a ENCODER / RESOLVER signal and an ANALOG SPINDLE signal for this axis test") , True)
return
else:
if not self.pwm or (not self.enc and not self.res) :
self.a.warning_dialog( _(" You must designate a ENCODER / RESOLVER signal and a PWM signal for this axis test") , True)
return
self.halrun = halrun = os.popen("halrun -Is > /dev/null", "w")
if debug:
halrun.write("echo\n")
data = self.d
widgets = self.w
axnum = "xyzas".index(axis)
pump = False
fastdac = get_value(widgets["fastdac"])
slowdac = get_value(widgets["slowdac"])
dacspeed = widgets.Dac_speed_fast.get_active()
dac_scale = get_value(widgets[axis+"outputscale"])
max_dac = get_value(widgets[axis+"maxoutput"])
if axis == "s":
pwmmaxlimit = get_value(widgets.soutputscale)
max_voltage_factor = 10.0/get_value(widgets.soutputmaxvoltage) # voltagelimit
pwmmaxoutput = pwmmaxlimit * max_voltage_factor
if widgets.susenegativevoltage.get_active():
pwmminlimit = -pwmmaxlimit
else:
pwmminlimit = 0
else:
pwmminlimit = get_value(widgets[axis+"outputminlimit"])
pwmmaxlimit = get_value(widgets[axis+"outputmaxlimit"])
pwmmaxoutput = get_value(widgets[axis+"outputscale"])
enc_scale = get_value(widgets[axis+"encoderscale"])
pump = self.d.findsignal("charge-pump")
print 'fast %d,max %d, ss max %d, dac_scale %d'%(fastdac,max_dac,pwmmaxoutput,dac_scale)
halrun.write("loadrt threads period1=%d name1=base-thread fp1=0 period2=%d name2=servo-thread \n" % (100000, self.d.servoperiod ))
load,read,write = self.a.hostmot2_command_string()
for i in load:
halrun.write('%s\n'%i)
halrun.write("loadusr halscope\n")
for i in read:
halrun.write('%s\n'%i)
if pump:
halrun.write( "loadrt charge_pump\n")
halrun.write( "net enable charge-pump.enable\n")
halrun.write( "net charge-pump <= charge-pump.out\n")
halrun.write( "addf charge-pump servo-thread\n")
for i in write:
halrun.write('%s\n'%i)
halrun.write( "newsig estop-out bit\n")
halrun.write( "sets estop-out false\n")
halrun.write( "newsig enable-not bit\n")
halrun.write( "newsig dir-not bit\n")
halrun.write( "newsig dir bit\n")
# search for pins with test signals that may be needed to enable amp
self.hal_test_signals(axis)
# setup sserial potentiometer
if self.pot:
halrun.write("net dac " + self.pot + "spinout\n")
halrun.write("net enable " + self.pot +"spinena\n")
halrun.write("net dir " + self.pot +"spindir\n")
halrun.write("setp "+self.pot+"spinout-minlim %.1f\n"% pwmminlimit)
halrun.write("setp "+self.pot+"spinout-maxlim %.1f\n"% pwmmaxlimit)
halrun.write("setp "+self.pot+"spinout-scalemax %.1f\n"% pwmmaxoutput)
potinvertlist = self.a.spindle_invert_pins(pot_sig)
for i in potinvertlist:
if i == _PD.POTO:
halrun.write("setp "+self.pot+"spindir-invert true\n")
if i == _PD.POTE:
halrun.write("setp "+self.pot+"spinena-invert true\n")
# setup pwm generator
if self.pwm:
if "pwm" in self.pwm: # mainboard PWM
pwmtype = self.d[pwm_sig+"type"]
if pwmtype == _PD.PWMP: pulsetype = 1
elif pwmtype == _PD.PDMP: pulsetype = 3
elif pwmtype == _PD.UDMU: pulsetype = 2
else:
print "**** ERROR PNCCONF- PWM type not recognized in open loop test"
return
halrun.write("setp %s %d \n"% (self.pwm +".output-type", pulsetype))
halrun.write("net enable %s \n"% (self.pwm +".enable"))
halrun.write("setp %s \n"% (self.pwm +".scale %f"% dac_scale))
ending = ".value"
pwminvertlist = self.a.pwmgen_invert_pins(pwm_sig)
for i in pwminvertlist:
halrun.write("setp "+i+".invert_output true\n")
else: # sserial PWM
pwm_enable = self.d.make_pinname(pwm_sig,False,True) # get prefix only
if 'analogout5' in self.pwm:
enable ='spinena'
else:
enable ='analogena'
halrun.write("net enable %s \n"% (pwm_enable + enable))
halrun.write("setp "+self.pwm+"-minlim %.1f\n"% pwmminlimit)
halrun.write("setp "+self.pwm+"-maxlim %.1f\n"% pwmmaxlimit)
halrun.write("setp "+self.pwm+"-scalemax %.1f\n"% pwmmaxoutput)
ending = ""
halrun.write("net dac %s \n"% (self.pwm + ending))
halrun.write("loadusr halmeter -s pin %s -g 550 500 330\n"% (self.pwm + ending))
halrun.write("loadusr halmeter pin %s -g 550 375\n"% (self.pwm + ending) )
halrun.write("loadusr halmeter -s sig enable -g 0 475 330\n")
# set up encoder
if self.enc:
print self.enc
halrun.write("net enc-reset %s \n"% (self.enc +".reset"))
halrun.write("setp %s.scale %f \n"% (self.enc, enc_scale))
halrun.write("setp %s \n"% (self.enc +".filter true"))
halrun.write("setp %s.counter-mode %s\n"% (self.enc, self.w.ssingleinputencoder.get_active()))
halrun.write("loadusr halmeter -s pin %s -g 550 550 330\n"% (self.enc +".position"))
halrun.write("loadusr halmeter -s pin %s -g 550 600 330\n"% (self.enc +".velocity"))
# set up resolver
if self.res:
halrun.write("net resolver-reset %s \n"% (self.res +".reset"))
halrun.write("setp %s.scale %f \n"% (self.res, enc_scale))
widgets.openloopdialog.set_title(_("%s Axis Test") % axis.upper())
widgets.openloopdialog.move(550,0)
self.jogplus = self.jogminus = self.enc_reset = self.res_reset = self.enable_amp = 0
self.axis_under_test = axis
widgets.testinvertmotor.set_active(widgets[axis+"invertmotor"].get_active())
widgets.testinvertencoder.set_active(widgets[axis+"invertencoder"].get_active())
widgets.testenc_scale.set_value(float(enc_scale))
widgets.fastdac.set_range(0,dac_scale)
widgets.slowdac.set_range(0,dac_scale)
self.update_axis_params()
halrun.write("start\n"); halrun.flush()
self.w['window1'].set_sensitive(0)
self.w.jogminus.set_sensitive(0)
self.w.jogplus.set_sensitive(0)
widgets.openloopdialog.show_all()
result = widgets.openloopdialog.run()
widgets.openloopdialog.hide()
time.sleep(.001)
halrun.close()
if result == gtk.RESPONSE_OK:
#widgets[axis+"maxacc"].set_text("%s" % widgets.testacc.get_value())
widgets[axis+"invertmotor"].set_active(widgets.testinvertmotor.get_active())
widgets[axis+"invertencoder"].set_active(widgets.testinvertencoder.get_active())
widgets[axis+"encoderscale"].set_value(widgets.testenc_scale.get_value())
#widgets[axis+"maxvel"].set_text("%s" % widgets.testvel.get_value())
self.axis_under_test = None
self.w['window1'].set_sensitive(1)
def update_axis_params(self, *args):
def get_value(d):
return self.a.get_value(d)
axis = self.axis_under_test
if axis is None: return
halrun = self.halrun
enc_scale = self.w.testenc_scale.get_value()
if self.w.testinvertencoder.get_active() == True:
enc_invert = -1
else:
enc_invert = 1
if self.w.Dac_speed_fast.get_active() == True:
output = get_value(self.w.fastdac)
else:
output = get_value(self.w.slowdac)
if self.jogminus == 1:
output = output * -1
elif not self.jogplus == 1:
output = 0
invertmotor = self.w.testinvertmotor.get_active()
output += get_value(self.w.testoutputoffset)
halrun.write("sets enable %d\n"% ( self.enable_amp))
halrun.write("sets enable-not %d\n"% ( not(self.enable_amp)))
halrun.write("sets estop-out %d\n"% ( self.enable_amp))
if invertmotor:
output = output * -1
if self.enc:
halrun.write("""setp %(scalepin)s.scale %(scale)f\n""" % { 'scalepin':self.enc, 'scale': (enc_scale * enc_invert)})
halrun.write("""sets enc-reset %(reset)d\n""" % { 'reset': self.enc_reset})
if self.res:
halrun.write("""setp %(scalepin)s.scale %(scale)f\n""" % { 'scalepin':self.res, 'scale': (enc_scale * enc_invert)})
halrun.write("""sets resolver-reset %(reset)d\n""" % { 'reset': self.res_reset})
if self.pwm:
halrun.write("""sets dac %(output)f\n""" % { 'output': output})
if self.pot:
halrun.write("""sets dac %(output)f\n""" % { 'output': abs(output)})
if output == 0:
halrun.write("sets dir false\n")
halrun.write("sets dir-not false\n")
elif output < 0:
halrun.write("sets dir true\n")
halrun.write("sets dir-not false\n")
else:
halrun.write("sets dir false\n")
halrun.write("sets dir-not true\n")
halrun.flush()
def oloop_jogminus(self, direction):
self.jogminus = direction
self.update_axis_params()
def oloop_jogplus(self, direction):
self.jogplus = direction
self.update_axis_params()
def oloop_resetencoder(self, state):
self.enc_reset = self.res_reset = state
self.update_axis_params()
def oloop_enableamp(self):
self.enable_amp = self.enable_amp * -1 + 1
self.w.jogminus.set_sensitive(self.enable_amp)
self.w.jogplus.set_sensitive(self.enable_amp)
self.update_axis_params()
def hal_test_signals(self, axis):
# during testing pncconf looks for pins with these signals names
# and connects to them so as to enable amps etc
# force-pin-true will just make the pin be true all the time
# this could be used as a temparary way to enable I/O that the
# specific machine needs on for the amp to work but pncconf doesn't look for.
if not axis == "s":
signallist = ((axis+"-enable"),"machine-is-enabled","estop-out","charge-pump","force-pin-true")
else:
signallist = ("spindle-cw","spindle-ccw","spindle-brake","spindle-on","machine-is-enabled",
"spindle-enable","estop-out","charge-pump","force-pin-true")
halrun = self.halrun
def write_pins(pname,p,i,t):
if p in signallist:
pinname = self.d.make_pinname(pname)
if pinname:
#print p, pname, i
if p == "estop-out": signal = p
elif p == "spindle-cw": signal = "dir"
elif p == "spindle-ccw": signal = "dir-not"
elif p == "spindle-brake": signal = "enable-not"
else: signal = "enable"
print pinname, p
if "parport" in pinname:
if p == "force-pin-true":
halrun.write("setp %s true\n"% (pinname))
else:
halrun.write("net %s %s \n"% (signal,pinname))
else:
if not "sserial" in pname: # mainboard GPIO need to be set to output/opendrain
halrun.write("setp %s true\n"% (pinname + ".is_output"))
if t == _PD.GPIOD: halrun.write("setp "+pinname+".is_opendrain true\n")
if "sserial" in pname and "dig" in pinname: ending = ".out" # 7i76 sserial board
elif "sserial" in pname: ending = "" # all other sserial
elif not "sserial" in pname: ending =".out" # mainboard GPIO
if p == "force-pin-true":
halrun.write("setp %s true\n"% ((pinname + ending)))
else:
halrun.write("net %s %s \n"% (signal,(pinname + ending)))
if i: # invert pin
if "sserial" in pname and "dig" in pinname: ending = ".invert" # 7i76 sserial board
elif "sserial" in pname or "parport" in pinname: ending = "-invert"# all other sserial or parport
else: ending = ".invert_output" # mainboard GPIO
halrun.write("setp %s true\n"% (pinname + ending ))
return
# search everything for multiple same named signal output pins
# mesa mainboard
for boardnum in range(0,int(self.d.number_mesa)):
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]) :
for pin in range(0,24):
pname = 'mesa%dc%dpin%d' % (boardnum,connector, pin)
p = self.d['mesa%dc%dpin%d' % (boardnum,connector, pin)]
i = self.d['mesa%dc%dpin%dinv' % (boardnum,connector, pin)]
t = self.d['mesa%dc%dpin%dtype' % (boardnum,connector, pin)]
if t in (_PD.GPIOO,_PD.GPIOD) and not p == "unused-output":
write_pins(pname,p,i,t)
# mesa sserial
if self.d["mesa%d_numof_sserialports"% (boardnum)]: # only check if we have sserialports
port = 0
for channel in range (0,self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._MAXSSERIALCHANNELS]):
if channel >4: break # TODO only have 5 channels worth of glade widgets
for pin in range (0,_PD._SSCOMBOLEN):
pname = 'mesa%dsserial%d_%dpin%d' % (boardnum,port,channel,pin)
p = self.d['mesa%dsserial%d_%dpin%d' % (boardnum,port,channel,pin)]
i = self.d['mesa%dsserial%d_%dpin%dinv' % (boardnum,port,channel,pin)]
t = self.d['mesa%dsserial%d_%dpin%dtype' % (boardnum,port,channel,pin)]
if t in (_PD.GPIOO,_PD.GPIOD) and not p == "unused-output":
write_pins(pname,p,i,t)
# parports
templist = ("pp1","pp2","pp3")
for j, k in enumerate(templist):
if self.d.number_pports < (j+1): break
for x in (1,2,3,4,5,6,7,8,9,14,16,17):
pname = "%s_Opin%d" % (k, x)
p = self.d[pname]
i = self.d[pname+"_inv"]
if not p == "unused-output":
write_pins(pname,p,i,None)
def launch_mesa_panel(self):
if not self.a.check_for_rt(): return
if not self.a.warning_dialog(_("Do to technical reasons this test panel can be loaded only once without reloading pncconf.\
You also will not be able to do any other testing untill you reload pncconf and quite possibly open a terminal and type 'halrun -U' \
I hesitate to even allow it's use but at times it's very useful.\nDo you wish to continue the test?"),False):
return
self.halrun = os.popen("halrun -Is > /dev/null", "w")
if debug:
halrun.write("echo\n")
self.halrun.write("loadrt threads period1=50000 name1=base-thread fp1=0 period2=1000000 name2=servo-thread\n")
load,read,write= self.a.hostmot2_command_string()
for i in load:
halrun.write('%s\n'%i)
for i in read:
halrun.write('%s\n'%i)
for i in write:
halrun.write('%s\n'%i)
self.halrun.write("start\n")
self.halrun.write("loadusr halmeter\n")
self.halrun.flush()
time.sleep(1)
try:
PyApp(self,self.d,self.w)
except:
self.halrun.close()
a = os.popen("halrun -U > /dev/null", "w")
a.flush()
time.sleep(1)
a.close()
a.kill()
def on_mesapanel_returned(self, *args):
#print "Quit test panel"
try:
self.halrun.write("delsig all\n")
self.halrun.write("exit\n")
self.halrun.flush()
time.sleep(1)
self.halrun.close()
a = os.popen("halrun -U > /dev/null", "w")
a.flush()
time.sleep(1)
a.close()
a.kill()
except :
pass
#***************************************************************
# testpanel code
class hal_interface:
def __init__(self):
try:
self.c = hal.component("testpanel")
except:
print"problem in HAL routine"
class Data2:
def __init__(self):
self.inv = []
self.swch = []
self.led = []
self.enc = []
self.pwm = []
self.stp = []
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
class LED(gtk.DrawingArea):
def __init__(self, parent):
self.par = parent
super(LED, self).__init__()
self._dia = 10
self._state = 0
self._on_color = [0.3, 0.4, 0.6]
self._off_color = [0.9, 0.1, 0.1]
self.set_size_request(25, 25)
self.connect("expose-event", self.expose)
# This method draws our widget
# it draws a black circle for a rim around LED
# Then depending on self.state
# fills in that circle with on or off color.
# the dim depends on self.diam
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_line_width(3)
#cr.set_source_rgb(0, 0, 0.0)
self.set_size_request(25, 25)
#cr.set_source_rgb(0, 0, 0.0)
#self.set_size_request(self._dia*2+5, self._dia*2+5)
w = self.allocation.width
h = self.allocation.height
cr.translate(w/2, h/2)
#cr = widget.window.cairo_create()
lg2 = cairo.RadialGradient(0, 0, 0, 0, 0, self._dia)
if self._state:
r = self._on_color[0]
g = self._on_color[1]
b = self._on_color[2]
else:
r = self._off_color[0]
g = self._off_color[1]
b = self._off_color[2]
lg2.add_color_stop_rgba(1, r/.25,g/.25,b/.25, 1)
lg2.add_color_stop_rgba(.5, r,g,b, .5)
#lg2.add_color_stop_rgba(0, 0, 0, 0, 1)
cr.arc(0, 0, self._dia, 0, 2*math.pi)
cr.stroke_preserve()
#cr.rectangle(20, 20, 300, 100)
cr.set_source(lg2)
cr.fill()
return False
# This sets the LED on or off
# and then redraws it
# Usage: ledname.set_active(True)
def set_active(self, data2 ):
self._state = data2
self.queue_draw()
# This allows setting of the on and off color
# Usage: ledname.set_color("off",[r,g,b])
def set_color(self, state, color = [0,0,0] ):
if state == "off":
self._off_color = color
elif state == "on":
self._on_color = color
else:
return
def set_dia(self, dia):
self._dia = dia
self.queue_draw()
class PyApp(gtk.Window):
def switch_callback(self, widget, component , boardnum,number, data=None):
print component,boardnum,number,data
if component == "switch":
invrt = self.data2["brd%dinv%d" % (boardnum,number)].get_active()
if (data and not invrt ) or (not data and invrt):
self.hal.c["brd.%d.switch.%d"% (boardnum, number)] = True
else:
self.hal.c["brd.%d.switch.%d"% (boardnum, number)] = False
if component == "invert":
self.switch_callback(None,"switch",boardnum,number,False)
def pwm_callback(self, widget, component , boardnum,number, data=None):
if component == "pwm":
value = self.data2["brd%dpwm%dadj" % (boardnum,number)].get_value()
active = self.data2["brd%dpmw_ckbutton%d"% (boardnum,number)].get_active()
self.hal.c["brd.%d.pwm.%d.enable"% (boardnum, number)] = active
if active:
self.hal.c["brd.%d.pwm.%d.value"% (boardnum, number)] = value
else:
self.hal.c["brd.%d.pwm.%d.value"% (boardnum, number)] = 0
def stp_callback(self, widget, component , boardnum,number, data=None):
if component == "stp":
value = self.data2["brd%dstp%dcmd" % (boardnum,number)].get_value()
active = self.data2["brd%dstp_ckbutton%d"% (boardnum,number)].get_active()
self.hal.c["brd.%d.stp.%d.enable"% (boardnum, number)] = active
if active:
self.hal.c["brd.%d.stp.%d.position-cmd"% (boardnum, number)] = value
def quit(self,widget):
self.w['window1'].set_sensitive(1)
gobject.source_remove(self.timer)
self.hal.c.exit()
self.app.on_mesapanel_returned()
return True
def update(self):
if hal.component_exists("testpanel"):
for i in (0,1):
for j in range(0,72):
try:
self.data2["brd%dled%d"%(i,j)].set_active(self.hal.c["brd.%d.led.%d"% (i,j)])
except :
continue
for k in range(0,16):
try:
self.data2["brd%denc%dcount"%(i,k)].set_text("%s"% str(self.hal.c["brd.%d.enc.%d.count"% (i,k)]))
except :
continue
return True # keep running this event
else:
return False # kill the event
# This creates blank labels for placemarks for components
# such as encoders that use 3 or 4 pins as input
# but only need one line for user interaction
# this keeps the page uniform
def make_blank(self,container,boardnum,number):
#blankname = "enc-%d" % (number)
#self.data2["brd%denc%d" % (boardnum,number)]= gtk.Button("Reset-%d"% number)
#self.hal.c.newpin(encname, hal.HAL_S32, hal.HAL_IN)
label = gtk.Label(" ")
container.pack_start(label, False, False, 10)
label = gtk.Label(" ")
container.pack_start(label, False, False, 10)
# This creates widgets and HAL pins for encoder controls
def make_enc(self,container,boardnum,number):
encname = "brd.%d.enc.%d.reset" % (boardnum,number)
print"making HAL pin enc bit Brd %d,num %d"%(boardnum,number)
self.hal.c.newpin(encname, hal.HAL_BIT, hal.HAL_OUT)
hal.new_sig(encname+"-signal",hal.HAL_BIT)
hal.connect("testpanel."+encname,encname+"-signal")
self.data2["brd%denc%dreset" % (boardnum,number)]= gtk.Button("Reset-%d"% number)
container.pack_start(self.data2["brd%denc%dreset" % (boardnum,number)], False, False, 10)
encname = "brd.%d.enc.%d.count" % (boardnum,number)
print"making HAL pin enc s32 brd %d num %d"%(boardnum,number)
self.hal.c.newpin(encname, hal.HAL_S32, hal.HAL_IN)
hal.new_sig(encname+"-signal",hal.HAL_S32)
hal.connect("testpanel."+encname,encname+"-signal")
label = self.data2["brd%denc%dcount" % (boardnum,number)] = gtk.Label("Encoder-%d"% (number))
label.set_size_request(100, -1)
container.pack_start(label, False, False, 10)
# This creates widgets and HAL pins for stepper controls
def make_stp(self,container,boardnum,number):
stpname = "brd.%d.stp.%d.position-cmd" % (boardnum,number)
self.hal.c.newpin(stpname, hal.HAL_FLOAT, hal.HAL_OUT)
hal.new_sig(stpname+"-signal",hal.HAL_FLOAT)
hal.connect("testpanel."+stpname,stpname+"-signal")
stpname = "brd.%d.stp.%d.enable" % (boardnum,number)
self.hal.c.newpin(stpname, hal.HAL_BIT, hal.HAL_OUT)
hal.new_sig(stpname+"-signal",hal.HAL_BIT)
hal.connect("testpanel."+stpname,stpname+"-signal")
adj = gtk.Adjustment(0.0, -1000.0, 1000.0, 1.0, 5.0, 0.0)
spin = self.data2["brd%dstp%dcmd" % (boardnum,number)]= gtk.SpinButton(adj, 0, 1)
adj.connect("value_changed", self.stp_callback,"stp",boardnum,number,None)
container.pack_start(spin, False, False, 10)
ckb = self.data2["brd%dstp_ckbutton%d"% (boardnum,number)] = gtk.CheckButton("Enable %d"% (number))
ckb.connect("toggled", self.stp_callback, "stp",boardnum,number,None)
container.pack_start(ckb, False, False, 10)
# This places a spinbox for pwm value and a checkbox to enable pwm
# It creates two HAL pins
def make_pwm(self,container,boardnum,number):
pwmname = "brd.%d.pwm.%d.value" % (boardnum,number)
print"making HAL pin pwm float brd%d num %d"%(boardnum,number)
self.hal.c.newpin(pwmname, hal.HAL_FLOAT, hal.HAL_OUT)
hal.new_sig(pwmname+"-signal",hal.HAL_FLOAT)
hal.connect("testpanel."+pwmname,pwmname+"-signal")
pwmname = "brd.%d.pwm.%d.enable" % (boardnum,number)
print"making HAL pin pwm bit brd %d num %d"%(boardnum,number)
self.hal.c.newpin(pwmname, hal.HAL_BIT, hal.HAL_OUT)
hal.new_sig(pwmname+"-signal",hal.HAL_BIT)
hal.connect("testpanel."+pwmname,pwmname+"-signal")
adj = self.data2["brd%dpwm%dadj" % (boardnum,number)] = gtk.Adjustment(0.0, -10.0, 10.0, 0.1, 0.5, 0.0)
adj.connect("value_changed", self.pwm_callback,"pwm",boardnum,number,None)
pwm = self.data2["brd%dpwm%d" % (boardnum,number)] = gtk.HScale(adj)
pwm.set_digits(1)
pwm.set_size_request(100, -1)
container.pack_start(pwm, False, False, 10)
ckb = self.data2["brd%dpmw_ckbutton%d"% (boardnum,number)] = gtk.CheckButton("PWM-%d\nON"% (number))
ckb.connect("toggled", self.pwm_callback, "pwm",boardnum,number,None)
container.pack_start(ckb, False, False, 10)
# This places a LED and a label in specified container
# it specifies the led on/off colors
# and creates a HAL pin
def make_led(self,container,boardnum,number):
ledname = "brd.%d.led.%d" % (boardnum,number)
print"making HAL pin led bit brd %d num %d"%(boardnum,number)
self.hal.c.newpin(ledname, hal.HAL_BIT, hal.HAL_IN)
hal.new_sig(ledname+"-signal",hal.HAL_BIT)
hal.connect("testpanel."+ledname,ledname+"-signal")
led = self.data2["brd%dled%d" % (boardnum,number)] = LED(self)
led.set_color("off",[1,0,0]) # red
led.set_color("on",[0,1,0]) # Green
container.pack_start(led, False, False, 10)
label = gtk.Label("<--GPIO-%d"% (number))
container.pack_start(label, False, False, 10)
# This is for placing a button (switch) and an invert check box into
# a specified container. It also creates the HAL pin
# and connects some signals.
def make_switch(self,container,boardnum,number):
# make a HAL pin
switchname = "brd.%d.switch.%d" % (boardnum,number)
print"making HAL pin switch bit brd %d num %d"%(boardnum,number)
self.hal.c.newpin(switchname, hal.HAL_BIT, hal.HAL_OUT)
hal.new_sig(switchname+"-signal",hal.HAL_BIT)
hal.connect("testpanel."+switchname,switchname+"-signal")
# add button to container using boarnum and number as a reference
button = self.data2["brd%dswch%d" % (boardnum,number)]= gtk.Button("OUT-%d"% number)
container.pack_start(button, False, False, 10)
# connect signals
button.connect("pressed", self.switch_callback, "switch",boardnum,number,True)
button.connect("released", self.switch_callback, "switch",boardnum,number,False)
# add invert switch
ckb = self.data2["brd%dinv%d" % (boardnum,number)]= gtk.CheckButton("Invert")
container.pack_start(ckb, False, False, 10)
ckb.connect("toggled", self.switch_callback, "invert",boardnum,number,None)
def __init__(self,App,data,widgets):
super(PyApp, self).__init__()
#print "init super pyapp"
self.data2 = Data2()
self.d = data
self.app = App
self.w = widgets
#self.halrun = self.app.halrun
#print "entering HAL init"
self.hal = hal_interface()
#print "done HAL init"
self.set_title("Mesa Test Panel")
self.set_size_request(450, 450)
self.set_position(gtk.WIN_POS_CENTER)
self.connect_after("destroy", self.quit)
self.timer = gobject.timeout_add(100, self.update)
#print "added timer"
brdnotebook = gtk.Notebook()
brdnotebook.set_tab_pos(gtk.POS_TOP)
brdnotebook.show()
self.add(brdnotebook)
for boardnum in range(0,int(self.d.number_mesa)):
board = self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._BOARDNAME]+".%d"% boardnum
self.data2["notebook%d"%boardnum] = gtk.Notebook()
self.data2["notebook%d"%boardnum].set_tab_pos(gtk.POS_TOP)
self.data2["notebook%d"%boardnum].show()
label = gtk.Label("Mesa Board Number %d"% (boardnum))
brdnotebook.append_page(self.data2["notebook%d"%boardnum], label)
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]) :
table = gtk.Table(12, 3, False)
seperator = gtk.VSeparator()
table.attach(seperator, 1, 2, 0, 12,True)
for pin in range (0,24):
if pin >11:
column = 2
adjust = -12
else:
column = 0
adjust = 0
firmptype,compnum = self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._STARTOFDATA+pin+(concount*24)]
pinv = 'mesa%dc%dpin%dinv' % (boardnum,connector,pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum,connector,pin)
pintype = self.w[ptype].get_active_text()
pininv = self.w[pinv].get_active()
truepinnum = (concount*24) + pin
# for output / open drain pins
if pintype in (_PD.GPIOO,_PD.GPIOD):
h = gtk.HBox(False,2)
self.make_switch(h,boardnum,truepinnum)
table.attach(h, 0 + column, 1 + column, pin + adjust, pin +1+ adjust,True)
hal.set_p("hm2_%s.gpio.%03d.is_output"% (board,truepinnum ),"true")
if pininv: hal.set_p("hm2_%s.gpio.%03d.invert_output"% (board,truepinnum ),"true")
hal.connect("hm2_%s.gpio.%03d.out"% (board,truepinnum ),"brd.%d.switch.%d-signal" % (boardnum,truepinnum))
# for input pins
elif pintype == _PD.GPIOI:
h = gtk.HBox(False,2)
self.make_led(h,boardnum,truepinnum)
table.attach(h, 0 + column, 1 + column, pin + adjust, pin +1+ adjust,True)
if pininv: hal.connect("hm2_%s.gpio.%03d.in_not"% (board,truepinnum),"brd.%d.led.%d-signal"% (boardnum,truepinnum))
else: hal.connect("hm2_%s.gpio.%03d.in"% (board,truepinnum),"brd.%d.led.%d-signal"% (boardnum,truepinnum))
# for encoder pins
elif pintype in (_PD.ENCA,_PD.ENCB,_PD.ENCI,_PD.ENCM):
h = gtk.HBox(False,2)
if pintype == _PD.ENCA:
self.make_enc(h,boardnum,compnum)
hal.connect("hm2_%s.encoder.%02d.reset"% (board,compnum), "brd.%d.enc.%d.reset-signal"% (boardnum,compnum))
hal.connect("hm2_%s.encoder.%02d.count"% (board,compnum), "brd.%d.enc.%d.count-signal"% (boardnum,compnum))
else:
self.make_blank(h,boardnum,compnum)
table.attach(h, 0 + column, 1 + column, pin + adjust, pin +1+ adjust,True)
# for PWM pins
elif pintype in (_PD.PWMP,_PD.PWMD,_PD.PWME,_PD.PDMP,_PD.PDMD,_PD.PDME,_PD.UDMD,_PD.UDME):
h = gtk.HBox(False,2)
if pintype in (_PD.PWMP,_PD.PDMP,_PD.UDMU):
self.make_pwm(h,boardnum,compnum)
hal.connect("hm2_%s.pwmgen.%02d.enable"% (board,compnum),"brd.%d.pwm.%d.enable-signal"% (boardnum,compnum))
hal.connect("hm2_%s.pwmgen.%02d.value"% (board,compnum),"brd.%d.pwm.%d.value-signal"% (boardnum,compnum))
hal.set_p("hm2_%s.pwmgen.%02d.scale"% (board,compnum),"10")
else:
self.make_blank(h,boardnum,compnum)
table.attach(h, 0 + column, 1 + column, pin + adjust, pin +1+ adjust,True)
# for Stepgen pins
elif pintype in (_PD.STEPA,_PD.STEPB):
h = gtk.HBox(False,2)
if pintype == _PD.STEPA:
self.make_stp(h,boardnum,compnum)
hal.connect("hm2_%s.stepgen.%02d.enable"% (board,compnum),"brd.%d.stp.%d.enable-signal"% (boardnum,compnum))
hal.connect("hm2_%s.stepgen.%02d.position-cmd"% (board,compnum),"brd.%d.stp.%d.position-cmd-signal"% (boardnum,compnum))
hal.set_p("hm2_%s.stepgen.%02d.maxaccel"% (board,compnum),"0")
hal.set_p("hm2_%s.stepgen.%02d.maxvel"% (board,compnum),"2000")
hal.set_p("hm2_%s.stepgen.%02d.steplen"% (board,compnum),"2000")
hal.set_p("hm2_%s.stepgen.%02d.stepspace"% (board,compnum),"2000")
hal.set_p("hm2_%s.stepgen.%02d.dirhold"% (board,compnum),"2000")
hal.set_p("hm2_%s.stepgen.%02d.dirsetup"% (board,compnum),"2000")
else:
self.make_blank(h,boardnum,compnum)
table.attach(h, 0 + column, 1 + column, pin + adjust, pin +1+ adjust,True)
else:
print "pintype error IN mesa test panel method pintype %s boardnum %d connector %d pin %d"% (pintype,boardnum,connector,pin)
label = gtk.Label("Mesa %d-Connector %d"% (boardnum,connector))
self.data2["notebook%d"%boardnum].append_page(table, label)
self.show_all()
self.w['window1'].set_sensitive(0)
self.hal.c.ready()
#print "got to end of panel"
# testpanel code end
#****************************************************************
| gpl-2.0 |
Rendaw/luxem-python | luxem/write.py | 1 | 1827 | import _luxem
from luxem.struct import Typed
class _ArrayElement(object):
def __init__(self, item):
self.item_iter = iter(item)
def step(self, writer, stack):
try:
next_child = next(self.item_iter)
writer._process(stack, next_child)
return True
except StopIteration:
writer.array_end()
return False
class _ObjectElement(object):
def step(self, writer, stack):
try:
next_child = next(self.item_iter)
writer.key(next_child[0])
writer._process(stack, next_child[1])
return True
except StopIteration:
writer.object_end()
return False
if hasattr(dict, 'iteritems'):
def init(self, item):
self.item_iter = item.iteritems()
_ObjectElement.__init__ = init
else:
def init(self, item):
self.item_iter = iter(item.items())
_ObjectElement.__init__ = init
class Writer(_luxem.Writer):
def _process(self, stack, item):
if isinstance(item, dict):
self.object_begin()
stack.append(_ObjectElement(item))
elif isinstance(item, list):
self.array_begin()
stack.append(_ArrayElement(item))
elif isinstance(item, Typed):
self.type(item.name)
self._process(stack, item.value)
else:
self.primitive(str(item))
def element(self, data):
stack = []
self._process(stack, data)
while stack:
while stack[-1].step(self, stack):
pass
stack.pop()
return self
def dump(dest, value, **kwargs):
Writer(target=dest, **kwargs).element(value)
def dumps(value, **kwargs):
w = Writer(**kwargs)
w.element(value)
return w.dump()
| bsd-2-clause |
Vizerai/grpc | src/python/grpcio_health_checking/health_commands.py | 9 | 2000 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the GRPC Python setup process."""
import os
import shutil
import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto')
class CopyProtoModules(setuptools.Command):
"""Command to copy proto modules from grpc/src/proto."""
description = ''
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if os.path.isfile(HEALTH_PROTO):
shutil.copyfile(HEALTH_PROTO,
os.path.join(ROOT_DIR,
'grpc_health/v1/health.proto'))
class BuildPackageProtos(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build grpc protobuf modules'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# due to limitations of the proto generator, we require that only *one*
# directory is provided as an 'include' directory. We assume it's the '' key
# to `self.distribution.package_dir` (and get a key error if it's not
# there).
from grpc_tools import command
command.build_package_protos(self.distribution.package_dir[''])
| apache-2.0 |
ecolitan/fatics | venv/lib/python2.7/site-packages/twisted/application/internet.py | 2 | 12569 | # -*- test-case-name: twisted.application.test.test_internet,twisted.test.test_application,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor-based Services
Here are services to run clients, servers and periodic services using
the reactor.
If you want to run a server service, L{StreamServerEndpointService} defines a
service that can wrap an arbitrary L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>}
as an L{IService}. See also L{twisted.application.strports.service} for
constructing one of these directly from a descriptive string.
Additionally, this module (dynamically) defines various Service subclasses that
let you represent clients and servers in a Service hierarchy. Endpoints APIs
should be preferred for stream server services, but since those APIs do not yet
exist for clients or datagram services, many of these are still useful.
They are as follows::
TCPServer, TCPClient,
UNIXServer, UNIXClient,
SSLServer, SSLClient,
UDPServer,
UNIXDatagramServer, UNIXDatagramClient,
MulticastServer
These classes take arbitrary arguments in their constructors and pass
them straight on to their respective reactor.listenXXX or
reactor.connectXXX calls.
For example, the following service starts a web server on port 8080:
C{TCPServer(8080, server.Site(r))}. See the documentation for the
reactor.listen/connect* methods for more information.
"""
from twisted.python import log
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.versions import Version
from twisted.application import service
from twisted.internet import task
from twisted.internet.defer import CancelledError
def _maybeGlobalReactor(maybeReactor):
"""
@return: the argument, or the global reactor if the argument is C{None}.
"""
if maybeReactor is None:
from twisted.internet import reactor
return reactor
else:
return maybeReactor
class _VolatileDataService(service.Service):
volatile = []
def __getstate__(self):
d = service.Service.__getstate__(self)
for attr in self.volatile:
if attr in d:
del d[attr]
return d
class _AbstractServer(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _port: instance of port set when the service is started.
@type _port: a provider of L{twisted.internet.interfaces.IListeningPort}.
"""
volatile = ['_port']
method = None
reactor = None
_port = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def privilegedStartService(self):
service.Service.privilegedStartService(self)
self._port = self._getPort()
def startService(self):
service.Service.startService(self)
if self._port is None:
self._port = self._getPort()
def stopService(self):
service.Service.stopService(self)
# TODO: if startup failed, should shutdown skip stopListening?
# _port won't exist
if self._port is not None:
d = self._port.stopListening()
del self._port
return d
def _getPort(self):
"""
Wrapper around the appropriate listen method of the reactor.
@return: the port object returned by the listen method.
@rtype: an object providing
L{twisted.internet.interfaces.IListeningPort}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'listen%s' % (self.method,))(*self.args, **self.kwargs)
class _AbstractClient(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _connection: instance of connection set when the service is started.
@type _connection: a provider of L{twisted.internet.interfaces.IConnector}.
"""
volatile = ['_connection']
method = None
reactor = None
_connection = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def startService(self):
service.Service.startService(self)
self._connection = self._getConnection()
def stopService(self):
service.Service.stopService(self)
if self._connection is not None:
self._connection.disconnect()
del self._connection
def _getConnection(self):
"""
Wrapper around the appropriate connect method of the reactor.
@return: the port object returned by the connect method.
@rtype: an object providing L{twisted.internet.interfaces.IConnector}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'connect%s' % (self.method,))(*self.args, **self.kwargs)
_doc={
'Client':
"""Connect to %(tran)s
Call reactor.connect%(tran)s when the service starts, with the
arguments given to the constructor.
""",
'Server':
"""Serve %(tran)s clients
Call reactor.listen%(tran)s when the service starts, with the
arguments given to the constructor. When the service stops,
stop listening. See twisted.internet.interfaces for documentation
on arguments to the reactor method.
""",
}
import types
for tran in 'TCP UNIX SSL UDP UNIXDatagram Multicast'.split():
for side in 'Server Client'.split():
if tran == "Multicast" and side == "Client":
continue
base = globals()['_Abstract'+side]
doc = _doc[side] % vars()
klass = types.ClassType(tran+side, (base,),
{'method': tran, '__doc__': doc})
globals()[tran+side] = klass
deprecatedModuleAttribute(
Version("Twisted", 13, 1, 0),
"It relies upon IReactorUDP.connectUDP "
"which was removed in Twisted 10. "
"Use twisted.application.internet.UDPServer instead.",
"twisted.application.internet", "UDPClient")
class TimerService(_VolatileDataService):
"""
Service to periodically call a function
Every C{step} seconds call the given function with the given arguments.
The service starts the calls when it starts, and cancels them
when it stops.
@ivar clock: Source of time. This defaults to L{None} which is
causes L{twisted.internet.reactor} to be used.
Feel free to set this to something else, but it probably ought to be
set *before* calling L{startService}.
@type clock: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@ivar call: Function and arguments to call periodically.
@type call: L{tuple} of C{(callable, args, kwargs)}
"""
volatile = ['_loop', '_loopFinshed']
def __init__(self, step, callable, *args, **kwargs):
"""
@param step: The number of seconds between calls.
@type step: L{float}
@param callable: Function to call
@type callable: L{callable}
@param args: Positional arguments to pass to function
@param kwargs: Keyword arguments to pass to function
"""
self.step = step
self.call = (callable, args, kwargs)
self.clock = None
def startService(self):
service.Service.startService(self)
callable, args, kwargs = self.call
# we have to make a new LoopingCall each time we're started, because
# an active LoopingCall remains active when serialized. If
# LoopingCall were a _VolatileDataService, we wouldn't need to do
# this.
self._loop = task.LoopingCall(callable, *args, **kwargs)
self._loop.clock = _maybeGlobalReactor(self.clock)
self._loopFinished = self._loop.start(self.step, now=True)
self._loopFinished.addErrback(self._failed)
def _failed(self, why):
# make a note that the LoopingCall is no longer looping, so we don't
# try to shut it down a second time in stopService. I think this
# should be in LoopingCall. -warner
self._loop.running = False
log.err(why)
def stopService(self):
"""
Stop the service.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is fired when the
currently running call (if any) is finished.
"""
if self._loop.running:
self._loop.stop()
self._loopFinished.addCallback(lambda _:
service.Service.stopService(self))
return self._loopFinished
class CooperatorService(service.Service):
"""
Simple L{service.IService} which starts and stops a L{twisted.internet.task.Cooperator}.
"""
def __init__(self):
self.coop = task.Cooperator(started=False)
def coiterate(self, iterator):
return self.coop.coiterate(iterator)
def startService(self):
self.coop.start()
def stopService(self):
self.coop.stop()
class StreamServerEndpointService(service.Service, object):
"""
A L{StreamServerEndpointService} is an L{IService} which runs a server on a
listening port described by an L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>}.
@ivar factory: A server factory which will be used to listen on the
endpoint.
@ivar endpoint: An L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>} provider
which will be used to listen when the service starts.
@ivar _waitingForPort: a Deferred, if C{listen} has yet been invoked on the
endpoint, otherwise None.
@ivar _raiseSynchronously: Defines error-handling behavior for the case
where C{listen(...)} raises an exception before C{startService} or
C{privilegedStartService} have completed.
@type _raiseSynchronously: C{bool}
@since: 10.2
"""
_raiseSynchronously = None
def __init__(self, endpoint, factory):
self.endpoint = endpoint
self.factory = factory
self._waitingForPort = None
def privilegedStartService(self):
"""
Start listening on the endpoint.
"""
service.Service.privilegedStartService(self)
self._waitingForPort = self.endpoint.listen(self.factory)
raisedNow = []
def handleIt(err):
if self._raiseSynchronously:
raisedNow.append(err)
elif not err.check(CancelledError):
log.err(err)
self._waitingForPort.addErrback(handleIt)
if raisedNow:
raisedNow[0].raiseException()
def startService(self):
"""
Start listening on the endpoint, unless L{privilegedStartService} got
around to it already.
"""
service.Service.startService(self)
if self._waitingForPort is None:
self.privilegedStartService()
def stopService(self):
"""
Stop listening on the port if it is already listening, otherwise,
cancel the attempt to listen.
@return: a L{Deferred<twisted.internet.defer.Deferred>} which fires
with C{None} when the port has stopped listening.
"""
self._waitingForPort.cancel()
def stopIt(port):
if port is not None:
return port.stopListening()
d = self._waitingForPort.addCallback(stopIt)
def stop(passthrough):
self.running = False
return passthrough
d.addBoth(stop)
return d
__all__ = (['TimerService', 'CooperatorService', 'MulticastServer',
'StreamServerEndpointService'] +
[tran+side
for tran in 'TCP UNIX SSL UDP UNIXDatagram'.split()
for side in 'Server Client'.split()])
| agpl-3.0 |
mohnish/planout | planout-editor/planout-editor-kernel.py | 9 | 2393 | from flask import Flask, jsonify, render_template, request, url_for
app = Flask(__name__)
from planout.interpreter import Interpreter
import traceback
import json
import sys
def testPlanOutScript(script, inputs={}, overrides=None, assertions=None):
payload = {}
# make sure experiment runs with the given inputs
i = Interpreter(script, 'demo_salt', inputs)
if overrides:
i.set_overrides(overrides)
try:
results = dict(i.get_params()) # executes experiment
except Exception as err:
#message = "Error running experiment: %s" % traceback.format_exc(0)
message = "Error running experiment:\n%s" % err
payload['errors'] = [{
"error_code": "runtime",
"message": message
}]
return payload
payload['results'] = results
# validate if input contains validation code
validation_errors = []
if assertions:
for (key, value) in assertions.iteritems():
if key not in results:
validation_errors.append({
"error_code": "assertion",
"message": {"param": key}
})
else:
if results[key] != value:
message = {'param': key, 'expected': value, 'got': results[key]}
validation_errors.append({
"error_code": "assertion",
"message": message
})
if validation_errors:
payload['errors'] = validation_errors
return payload
@app.route('/run_test')
def run_test():
# not sure how to change everything to use POST requests
raw_script = request.args.get('compiled_code', '')
raw_inputs = request.args.get('inputs', '')
raw_overrides = request.args.get('overrides', "{}")
raw_assertions = request.args.get('assertions', "{}")
id = request.args.get('id')
script = json.loads(raw_script) if raw_script else {}
try:
inputs = json.loads(raw_inputs)
overrides = json.loads(raw_overrides) if raw_overrides else None
assertions = json.loads(raw_assertions) if raw_assertions else None
except:
return jsonify({
'errors': [{
'error_code': "INVALID_FORM",
'message': 'Invalid form input'
}],
'id': id
})
t = testPlanOutScript(script, inputs, overrides, assertions)
t['id'] = id
return jsonify(t)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
url_for('static', filename='planoutstyle.css')
| bsd-3-clause |
zeeman/cyder | cyder/management/commands/lib/dhcpd_compare2/dhcp_objects.py | 2 | 6248 | from functools import total_ordering
from ipaddr import IPAddress
from itertools import ifilter
def is_rangestmt(x):
return isinstance(x, RangeStmt)
def join_p(xs, indent=1, prefix=''):
if not xs:
return ''
lines = "".join(map(str, xs)).splitlines()
prefix += ' ' * indent
return "".join(prefix + line + '\n' for line in lines)
@total_ordering
class DHCPMixin(object):
side = ''
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.TYPE < other.TYPE or (self.TYPE == other.TYPE and
self._sort_key < other._sort_key)
def __str__(self):
s = ''
if hasattr(self, 'contents') and self.contents:
map(lambda x: x.set_sort_key(), self.contents)
if hasattr(self, 'comment') and self.comment:
comment = ' # ' + self.comment
else:
comment = ''
s += self.side + self.firstline + ' {' + comment + '\n'
s += join_p(sorted(self.contents), prefix=self.side)
s += self.side + '}\n'
if hasattr(self, 'related') and self.related:
map(lambda x: x.set_sort_key(), self.related)
s += join_p(sorted(self.related), indent=0)
# they print their own side
return s
class Statement(DHCPMixin):
TYPE = 1
def __init__(self, statement):
self.statement = statement
def set_sort_key(self):
self._sort_key = self.statement
def __eq__(self, other):
return (isinstance(other, Statement)
and self.statement == other.statement)
def __hash__(self):
return hash(self.statement)
def __str__(self):
return self.side + self.statement + ';\n'
class RangeStmt(Statement):
TYPE = 0
def __init__(self, start, end):
self.statement = 'range {0} {1}'.format(start, end)
self.start = start
self.end = end
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.start)),
int(IPAddress(self.end)))
def __eq__(self, other):
return (isinstance(other, RangeStmt) and self.start == other.start
and self.end == other.end)
class Pool(DHCPMixin):
TYPE = 2
def __init__(self, contents=None):
self.contents = set(contents or [])
self.firstline = 'pool'
rs = next(ifilter(is_rangestmt, contents))
self.start, self.end = rs.start, rs.end
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.start)),
int(IPAddress(self.end)))
def __eq__(self, other):
return (isinstance(other, Pool) and self.start == other.start
and self.end == other.end)
def __hash__(self):
return hash(self.start + self.end)
class Subnet(DHCPMixin):
TYPE = 3
def __init__(self, netaddr, netmask, contents=None):
self.netaddr = netaddr
self.netmask = netmask
self.contents = set(contents or [])
self.firstline = 'subnet {0} netmask {1}'.format(self.netaddr,
self.netmask)
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.netaddr)),
int(IPAddress(self.netmask)))
def __eq__(self, other):
return (isinstance(other, Subnet) and self.netaddr == other.netaddr
and self.netmask == other.netmask)
def __hash__(self):
return hash(self.netaddr + self.netmask)
class Subclass(DHCPMixin):
TYPE = 4
def __init__(self, classname, match, contents=None):
self.classname = classname
self.match = match
self.contents = set(contents or [])
self.firstline = 'subclass "{0}" {1}'.format(self.classname,
self.match)
def set_sort_key(self):
self._sort_key = self.classname + self.match
def __eq__(self, other):
return (isinstance(other, Subclass)
and self.classname == other.classname
and self.match == other.match)
def __hash__(self):
return hash(self.classname + self.match)
def __str__(self):
if self.contents:
return super(Subclass, self).__str__()
else:
return self.side + self.firstline + ';\n'
class Class(DHCPMixin):
TYPE = 5
def __init__(self, name, contents=None, related=None):
self.name = name
self.contents = set(contents or [])
self.related = set(related or [])
self.firstline = 'class "{0}"'.format(self.name)
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Class) and self.name == other.name
def __hash__(self):
return hash(self.name)
def add_subclass(self, match, contents):
self.related.add(Subclass(self.name, match, contents))
class Group(DHCPMixin):
TYPE = 6
def __init__(self, name, contents=None):
self.name = name
self.contents = set(contents or [])
self.firstline = 'group'
self.comment = self.name
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Group) and self.name == other.name
def __hash__(self):
return hash(self.name)
class Host(DHCPMixin):
TYPE = 7
def __init__(self, name, contents=None):
self.name = name
self.contents = set(contents or [])
self.firstline = 'host ' + self.name
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Host) and self.name == other.name
def __hash__(self):
return hash(self.name)
class ConfigFile(DHCPMixin):
def __init__(self, related=None):
self.related = set(related or [])
def add(self, obj):
if obj:
self.related.add(obj)
def get_class(self, name):
classes = ifilter(lambda x: isinstance(x, Class) and x.name == name,
self.related)
return next(classes)
| bsd-3-clause |
UECIDE/UECIDE_data | compilers/arm-eabi-gcc/linux/arm-eabi-gcc/arm-none-eabi/lib/thumb/armv7e-m/libstdc++.a-gdb.py | 1 | 2382 | # -*- python -*-
# Copyright (C) 2009, 2010 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home2/kimballr/yagarto/tools/lm4f/share/gcc-4.7.1/python'
libdir = '/home2/kimballr/yagarto/tools/lm4f/arm-none-eabi/lib/thumb/armv7e-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| bsd-3-clause |
mosaic-cloud/mosaic-distribution-dependencies | dependencies/nodejs/0.8.22/deps/npm/node_modules/node-gyp/gyp/test/small/gyptest-small.py | 89 | 1405 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Runs small tests.
"""
import imp
import os
import sys
import unittest
import TestGyp
test = TestGyp.TestGyp()
# Add pylib to the import path (so tests can import their dependencies).
# This is consistant with the path.append done in the top file "gyp".
sys.path.append(os.path.join(test._cwd, 'pylib'))
# Add new test suites here.
files_to_test = [
'pylib/gyp/MSVSSettings_test.py',
'pylib/gyp/easy_xml_test.py',
'pylib/gyp/generator/msvs_test.py',
'pylib/gyp/generator/ninja_test.py',
'pylib/gyp/common_test.py',
]
# Collect all the suites from the above files.
suites = []
for filename in files_to_test:
# Carve the module name out of the path.
name = os.path.splitext(os.path.split(filename)[1])[0]
# Find the complete module path.
full_filename = os.path.join(test._cwd, filename)
# Load the module.
module = imp.load_source(name, full_filename)
# Add it to the list of test suites.
suites.append(unittest.defaultTestLoader.loadTestsFromModule(module))
# Create combined suite.
all_tests = unittest.TestSuite(suites)
# Run all the tests.
result = unittest.TextTestRunner(verbosity=2).run(all_tests)
if result.failures or result.errors:
test.fail_test()
test.pass_test()
| apache-2.0 |
htygithub/bokeh | bokeh/model.py | 1 | 13736 | from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__file__)
from json import loads
from six import iteritems
from .properties import Any, HasProps, List, MetaHasProps, String
from .query import find
from .util.callback_manager import CallbackManager
from .util.future import with_metaclass
from .util.serialization import make_id
from ._json_encoder import serialize_json
from .themes import default as default_theme
class Viewable(MetaHasProps):
""" Any plot object (Data Model) which has its own View Model in the
persistence layer.
One thing to keep in mind is that a Viewable should have a single
unique representation in the persistence layer, but it might have
multiple concurrent client-side Views looking at it. Those may
be from different machines altogether.
"""
# Stores a mapping from subclass __view_model__ names to classes
model_class_reverse_map = {}
# Mmmm.. metaclass inheritance. On the one hand, it seems a little
# overkill. On the other hand, this is exactly the sort of thing
# it's meant for.
def __new__(meta_cls, class_name, bases, class_dict):
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
class_dict["get_class"] = Viewable.get_class
# Create the new class
newcls = super(Viewable, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
entry = class_dict.get("__subtype__", class_dict["__view_model__"])
# Add it to the reverse map, but check for duplicates first
if entry in Viewable.model_class_reverse_map and not hasattr(newcls, "__implementation__"):
raise Warning("Duplicate __view_model__ or __subtype__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
Viewable.model_class_reverse_map[entry]))
Viewable.model_class_reverse_map[entry] = newcls
return newcls
@classmethod
def _preload_models(cls):
from . import models; models
from .crossfilter import models as crossfilter_models; crossfilter_models
from .charts import Chart; Chart
@classmethod
def get_class(cls, view_model_name):
""" Given a __view_model__ name, returns the corresponding class
object
"""
cls._preload_models()
d = Viewable.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
class Model(with_metaclass(Viewable, HasProps, CallbackManager)):
""" Base class for all plot-related objects """
name = String()
tags = List(Any)
def __init__(self, **kwargs):
self._id = kwargs.pop("id", make_id())
self._document = None
super(Model, self).__init__(**kwargs)
default_theme.apply_to_model(self)
def _attach_document(self, doc):
'''This should only be called by the Document implementation to set the document field'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
self._document = doc
doc.theme.apply_to_model(self)
def _detach_document(self):
'''This should only be called by the Document implementation to unset the document field'''
self._document = None
default_theme.apply_to_model(self)
@property
def document(self):
return self._document
def trigger(self, attr, old, new):
dirty = { 'count' : 0 }
def mark_dirty(obj):
dirty['count'] += 1
if self._document is not None:
self._visit_value_and_its_immediate_references(new, mark_dirty)
self._visit_value_and_its_immediate_references(old, mark_dirty)
if dirty['count'] > 0:
self._document._invalidate_all_models()
# chain up to invoke callbacks
super(Model, self).trigger(attr, old, new)
@property
def ref(self):
if "__subtype__" in self.__class__.__dict__:
return {
'type': self.__view_model__,
'subtype': self.__subtype__,
'id': self._id,
}
else:
return {
'type': self.__view_model__,
'id': self._id,
}
def select(self, selector):
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[Model]
'''
return find(self.references(), selector)
def select_one(self, selector):
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
Model
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one object matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def layout(self, side, plot):
try:
return self in getattr(plot, side)
except:
return []
@classmethod
def _visit_immediate_value_references(cls, value, visitor):
''' Visit all references to another Model without recursing into any
of the child Model; may visit the same Model more than once if
it's referenced more than once. Does not visit the passed-in value.
'''
if isinstance(value, HasProps):
for attr in value.properties_with_refs():
child = getattr(value, attr)
cls._visit_value_and_its_immediate_references(child, visitor)
else:
cls._visit_value_and_its_immediate_references(value, visitor)
@classmethod
def _visit_value_and_its_immediate_references(cls, obj, visitor):
if isinstance(obj, Model):
visitor(obj)
elif isinstance(obj, HasProps):
# this isn't a Model, so recurse into it
cls._visit_immediate_value_references(obj, visitor)
elif isinstance(obj, (list, tuple)):
for item in obj:
cls._visit_value_and_its_immediate_references(item, visitor)
elif isinstance(obj, dict):
for key, value in iteritems(obj):
cls._visit_value_and_its_immediate_references(key, visitor)
cls._visit_value_and_its_immediate_references(value, visitor)
@classmethod
def collect_models(cls, *input_values):
""" Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go. The resulting list
is duplicate-free based on objects' identifiers.
"""
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj._id not in ids:
queued.append(obj)
for value in input_values:
cls._visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj._id not in ids:
ids.add(obj._id)
collected.append(obj)
cls._visit_immediate_value_references(obj, queue_one)
return collected
def references(self):
"""Returns all ``Models`` that this object has references to. """
return set(self.collect_models(self))
def _to_json_like(self, include_defaults):
""" Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use to_json() instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default.
"""
attrs = self.properties_with_values(include_defaults=include_defaults)
for (k, v) in attrs.items():
# we can't serialize Infinity, we send it as None and
# the other side has to fix it up. This transformation
# can't be in our _json_encoder because the json
# module checks for inf before it calls the custom
# encoder.
if isinstance(v, float) and v == float('inf'):
attrs[k] = None
return attrs
def to_json(self, include_defaults):
""" Returns a dictionary of the attributes of this object,
containing only "JSON types" (string, number, boolean,
none, dict, list).
References to other objects are serialized as "refs" (just
the object ID and type info), so the deserializer will
need to separately have the full attributes of those
other objects.
There's no corresponding from_json() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
"""
return loads(self.to_json_string(include_defaults=include_defaults))
def to_json_string(self, include_defaults):
"""Returns a JSON string encoding the attributes of this object.
References to other objects are serialized as references
(just the object ID and type info), so the deserializer
will need to separately have the full attributes of those
other objects.
There's no corresponding from_json_string() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
"""
json_like = self._to_json_like(include_defaults=include_defaults)
json_like['id'] = self._id
# we sort_keys to simplify the test suite by making the returned
# string deterministic. serialize_json "fixes" the JSON from
# _to_json_like by converting all types into plain JSON types
# (it converts Model into refs, for example).
return serialize_json(json_like, sort_keys=True)
def update(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return "%s, ViewModel:%s, ref _id: %s" % (self.__class__.__name__,
self.__view_model__, getattr(self, "_id", None))
def _find_some_document(models):
from .document import Document
# First try the easy stuff...
doc = None
for model in models:
if isinstance(model, Document):
doc = model
break
elif isinstance(model, Model):
if model.document is not None:
doc = model.document
break
# Now look in children of models
if doc is None:
for model in models:
if isinstance(model, Model):
# see if some child of ours is in a doc, this is meant to
# handle a thing like:
# p = figure()
# box = HBox(children=[p])
# show(box)
for r in model.references():
if r.document is not None:
doc = r.document
break
return doc
class _ModelInDocument(object):
# 'models' can be a single Model, a single Document, or a list of either
def __init__(self, models):
from .document import Document
self._to_remove_after = []
if not isinstance(models, list):
models = [models]
self._doc = _find_some_document(models)
if self._doc is None:
# oh well - just make up a doc
self._doc = Document()
for model in models:
if isinstance(model, Model):
if model.document is None:
self._to_remove_after.append(model)
def __exit__(self, type, value, traceback):
for model in self._to_remove_after:
model.document.remove_root(model)
def __enter__(self):
for model in self._to_remove_after:
self._doc.add_root(model)
| bsd-3-clause |
cnvogelg/fs-uae-gles | launcher/fs_uae_launcher/server/game.py | 4 | 25322 | """
FS-UAE Netplay Game Server
Copyright (C) 2012 Frode Solheim
This library is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or (at
your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from __future__ import print_function
import sys
import time
from collections import deque
import socket
import traceback
import threading
import random
from hashlib import sha1
SERVER_PROTOCOL_VERSION = 1
MAX_PLAYERS = 6
if sys.version > '3':
PYTHON3 = True
else:
PYTHON3 = False
if PYTHON3:
def int_to_bytes(number):
return bytes([(number & 0xff000000) >> 24, (number & 0x00ff0000) >> 16,
(number & 0x0000ff00) >> 8, (number & 0x000000ff)])
def bytes_to_int(m):
return m[0] << 24 | m[1] << 16 | m[2] << 8 | m[3]
def byte_ord(v):
#print("byte_ord", v)
try:
return v[0]
except TypeError:
return v
def byte(v):
return bytes([v])
server_protocol_version = byte(SERVER_PROTOCOL_VERSION)
else:
def int_to_bytes(number):
return chr((number & 0xff000000) >> 24) + \
chr((number & 0x00ff0000) >> 16) + \
chr((number & 0x0000ff00) >> 8) + \
chr((number & 0x000000ff))
def bytes_to_int(m):
return ord(m[0]) << 24 | ord(m[1]) << 16 | ord(m[2]) << 8 | ord(m[3])
def byte_ord(v):
return ord(v)
def byte(v):
return v
server_protocol_version = chr(SERVER_PROTOCOL_VERSION)
max_drift = 25
num_clients = 2
port = 25100
host = "0.0.0.0"
game = None
game_password = 0
launch_timeout = 0
def create_game_password(value):
# for python 2 + 3 compatibility
#if not isinstance(value, unicode):
value = value.encode("UTF-8")
#print(repr(value))
h = sha1()
h.update(b"FSNP")
val = b""
for v in value:
if byte_ord(v) < 128:
val += byte(v)
#print("update:", repr(val))
h.update(val)
return bytes_to_int(h.digest()[:4])
for arg in sys.argv:
if arg.startswith("--"):
parts = arg[2:].split("=", 1)
if len(parts) == 2:
key, value = parts
key = key.lower()
if key == "port":
port = int(value)
elif key == "players":
num_clients = int(value)
elif key == "password":
#game_password = crc32(value) & 0xffffffff
game_password = create_game_password(value)
#print("game password (numeric) is", game_password)
elif key == "launch-timeout":
launch_timeout = int(value)
MESSAGE_READY = 0
MESSAGE_MEM_CHECK = 5
MESSAGE_RND_CHECK = 6
MESSAGE_PING = 7
MESSAGE_PLAYERS = 8
MESSAGE_PLAYER_TAG_0 = 9
MESSAGE_PLAYER_TAG_1 = 10
MESSAGE_PLAYER_TAG_2 = 11
MESSAGE_PLAYER_TAG_3 = 12
MESSAGE_PLAYER_TAG_4 = 13
MESSAGE_PLAYER_TAG_5 = 14
MESSAGE_PLAYER_PING = 15
MESSAGE_PLAYER_LAG = 16
MESSAGE_SET_PLAYER_TAG = 17
MESSAGE_PROTOCOL_VERSION = 18
MESSAGE_EMULATION_VERSION = 19
MESSAGE_ERROR = 20
MESSAGE_TEXT = 21
MESSAGE_SESSION_KEY = 22
#MESSAGE_MEM_CHECK = 5
#MESSAGE_RND_CHECK = 6
#MESSAGE_PING = 7
MESSAGE_MEMCHECK_MASK = (0x80000000 | (MESSAGE_MEM_CHECK << 24))
MESSAGE_RNDCHECK_MASK = (0x80000000 | (MESSAGE_RND_CHECK << 24))
ERROR_PROTOCOL_MISMATCH = 1
ERROR_WRONG_PASSWORD = 2
ERROR_CANNOT_RESUME = 3
ERROR_GAME_ALREADY_STARTED = 4
ERROR_PLAYER_NUMBER = 5
ERROR_EMULATOR_MISMATCH = 6
ERROR_CLIENT_ERROR = 7
ERROR_MEMORY_DESYNC = 8
ERROR_RANDOM_DESYNC = 9
ERROR_SESSION_KEY = 10
ERROR_GAME_STOPPED = 99
def create_ext_message(ext, data):
return 0x80000000 | ext << 24 | (data & 0xffffff)
class Client:
def __init__(self, socket, address):
#self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket = socket
self.messages = []
self.lock = threading.Lock()
self.address = address
self.ready = 0
self.tag = b"PLY"
self.player = 0
self.playing = False
self.frame = 0
self.frame_times = [0.0 for x in range(100)]
self.lag = 0.0
self.out_seq = 0
self.memcheck = [(0, 0) for x in range(100)]
self.rndcheck = [(0, 0) for x in range(100)]
self.ping_sent_at = 0
self.pings = deque([0 for x in range(10)])
self.pings_sum = 0
self.pings_avg = 0
#self.protocol_version = 0
self.emulator_version = b""
self.session_key = 0
self.temp_a = 0
self.temp_b = 0
self.temp_c = 0
threading.Thread(target=self.__thread_function).start()
def send_error_message(self, error_num):
print(self, "error", error_num)
message = 0x80000000 | (MESSAGE_ERROR) << 24 | error_num
self.send_message(message)
def send_message(self, message):
with self.lock:
#if message == 0x87000000:
# #print("queueing %08x" % (message,))
# #traceback.print_stack()
# self.temp_c += 1
self.__send_data(int_to_bytes(message))
def __send_data(self, data):
#if data[0] == '\x87':
# #print("queueing ping")
# #traceback.print_stack()
# self.temp_c += 1
self.socket.sendall(data)
def queue_message(self, message):
with self.lock:
#if message == 0x87000000:
# #print("queueing %08x" % (message,))
# #traceback.print_stack()
# self.temp_c += 1
#print("queueing %08x" % (message,))
#if message & 0x20000000:
# traceback.print_stack()
self.messages.append(int_to_bytes(message))
if len(self.messages) == 100:
self.__send_queued_messages()
def queue_bytes(self, message):
with self.lock:
self.messages.append(message)
if len(self.messages) == 100:
self.__send_queued_messages()
def send_queued_messages(self):
with self.lock:
self.__send_queued_messages()
def __send_queued_messages(self):
data = b"".join(self.messages)
self.messages[:] = []
#print("sending", repr(data))
self.__send_data(data)
def initialize_client(self):
print("initialize", self)
def read_bytes(num):
data = b""
for i in range(num):
data = data + self.socket.recv(1)
if not len(data) == num:
raise Exception("did not get {0} bytes".format(num))
return data
# check header message
data = read_bytes(4)
if data == b"PING":
# connection check only
self.__send_data(b"PONG")
return False
if data != b"FSNP":
print(data)
raise Exception("did not get expected FSNP header")
# check protocol version
data = self.socket.recv(1)
#print(repr(data), repr(server_protocol_version))
if data != server_protocol_version:
print("protocol mismatch")
self.send_error_message(ERROR_PROTOCOL_MISMATCH)
return False
# check net play password
password = bytes_to_int(read_bytes(4))
if password != game_password:
print("wrong password")
self.send_error_message(ERROR_WRONG_PASSWORD)
return False
# read emulator version
self.emulator_version = read_bytes(8)
# read player number and session key, session key is checked to
# make sure another client cannot hijack this player slot
self.session_key = bytes_to_int(b"\0" + read_bytes(3))
self.player = byte_ord(self.socket.recv(1))
self.tag = read_bytes(3)
# get package sequence number
self.resume_from_packet = bytes_to_int(read_bytes(4))
error = game.add_client(self)
if error:
print(repr(error))
self.send_error_message(error)
return False
message = create_ext_message(MESSAGE_SESSION_KEY, self.session_key)
self.queue_message(message)
data = (self.player << 8) | num_clients
message = create_ext_message(MESSAGE_PLAYERS, data)
self.queue_message(message)
game.send_player_tags(self)
self.send_queued_messages()
print("initialize done for", self)
return True
def __thread_function(self):
try:
try:
if not self.initialize_client():
#print("initialize failed for", self)
return
self.receive_loop()
except Exception:
traceback.print_exc()
game.stop = True
finally:
try:
self.socket.close()
except Exception:
pass
def receive_loop(self):
data = b""
count = 0
while not game.stop:
new_data = self.socket.recv(4 - count)
if not new_data:
# FIXME
return
data = data + new_data
count = len(data)
if count == 4:
self.on_message(data)
count = 0
data = b""
def send_ping(self):
with self.lock:
#print("ping?", self.ping_sent_at)
if self.ping_sent_at == 0:
self.temp_a += 1
self.ping_sent_at = time.time()
message = int_to_bytes(0x80000000 | (7 << 24))
self.__send_data(message)
assert self.ping_sent_at > 0
def on_ping(self):
# may not need lock here
with self.lock:
self.temp_b += 1
if self.temp_a != self.temp_b:
print(self.temp_a, self.temp_b, self.temp_c)
assert self.ping_sent_at > 0
t = time.time()
new = (t - self.ping_sent_at) / 1.0
#print(t, "-", self.ping_sent_at, "=", new)
old = self.pings.popleft()
self.pings.append(new)
#print(self.pings)
self.pings_sum = self.pings_sum - old + new
self.pings_avg = self.pings_sum / len(self.pings)
self.ping_sent_at = 0
def on_message(self, m):
message = bytes_to_int(m)
#with game.lock:
# if not self.playing:
# print(self, "is no longer playing/valid, ignoring message")
if message & 0x80000000:
# ext message
command = (message & 0x7f000000) >> 24
data = message & 0x00ffffff;
if command == MESSAGE_MEM_CHECK:
self.memcheck[self.frame % 100] = (data, self.frame)
elif command == MESSAGE_RND_CHECK:
self.rndcheck[self.frame % 100] = (data, self.frame)
elif command == MESSAGE_PING:
#print("{0:x}".format(message))
self.on_ping()
elif command == MESSAGE_TEXT:
print("received text command")
remaining = data
text = b""
while not game.stop:
part = self.socket.recv(remaining)
count = len(part)
text += part
remaining = remaining - count
if remaining == 0:
game.add_text_message(self, text)
break
elif message & (1 << 30):
frame = message & 0x3fffffff
#print("received frame", frame)
if frame != self.frame + 1:
print("error, expected frame", self.frame + 1, "got", frame)
#print("received frame", frame)
self.frame = frame
t = time.time()
self.frame_times[self.frame % 100] = t
game_t = game.frame_times[self.frame % 100]
self.lag = t - game_t
elif message & (1 << 29):
game.add_input_event(self, message & 0x00ffffff)
else:
print("warning: unknown command received %x" % (message,))
def __str__(self):
return "<Client {0}:{1} {2}>".format(self.player, self.tag,
self.address)
def create_session_key():
return random.randint(0, 2**24 - 1)
class Game:
def __init__(self, num_clients):
self.started = False
self.frame = 0
self.time = 0
self.clients = []
self.num_clients = 0
self.frame_times = [0.0 for x in range(100)]
self.lock = threading.Lock()
self.stop = False
self.session_keys = [0 for x in range(MAX_PLAYERS)]
self.emulator_version = b""
self.verified_frame = -1
def __start(self):
if len(self.clients) != num_clients:
printf("error - cannot start until all players have connected")
return
print("{0} clients connected, starting game".format(num_clients))
self.started = True
threading.Thread(target=self.__thread_function).start()
def add_client(self, client):
with self.lock:
if client.player == 0xff:
if client.resume_from_packet != 0:
return ERROR_CLIENT_ERROR
if self.started:
return ERROR_GAME_ALREADY_STARTED
client.player = len(self.clients)
if client.player == 0:
self.emulator_version = client.emulator_version
else:
if self.emulator_version != client.emulator_version:
return ERROR_EMULATOR_MISMATCH
client.session_key = create_session_key()
self.session_keys[client.player] = client.session_key
self.clients.append(client)
client.playing = True
if not self.started:
if len(self.clients) == num_clients:
self.__start()
else:
if client.player >= len(self.clients):
return ERROR_PLAYER_NUMBER
if self.session_keys[client.player] != client.session_key:
return ERROR_SESSION_KEY
old_client = self.clients[client.player]
# FIXME: must transfer settings for resuming to work
self.clients[client.player] = client
client.playing = True
if client.resume_from_packet > 0:
# cannot resume yet...
print("cannot resume at packet", resume_from_packet)
return ERROR_CANNOT_RESUME
return 0
# FIXME: start the game..?
def __thread_function(self):
try:
self.__game_loop()
except Exception:
traceback.print_exc()
self.stop = True
def __send_player_tags(self, send_to_client):
for i, client in enumerate(game.clients):
data = bytes_to_int(b"\0" + client.tag)
message = create_ext_message(MESSAGE_PLAYER_TAG_0 + i, data)
send_to_client.queue_message(message)
def send_player_tags(self, client):
with game.lock:
self.__send_player_tags(client)
def __game_loop(self):
with self.lock:
for client in self.clients:
self.__send_player_tags(client)
print("game loop running")
self.time = time.time()
while True:
if self.stop:
print("stopping game loop")
# try to send error message to all players
with self.lock:
for client in self.clients:
try:
client.send_error_message(ERROR_GAME_STOPPED)
except Exception:
traceback.print_exc()
return
self.__game_loop_iteration()
def __game_loop_iteration(self):
# FIXME: higher precision sleep?
target_time = self.time + 0.02
t2 = time.time()
diff = target_time - t2
sleep = diff - 0.001
if sleep > 0.0:
#print(sleep)
time.sleep(sleep)
self.time = target_time
while time.time() < target_time:
# busy-loop until target time
pass
with self.lock:
if self.frame % 100 == 0:
self.__send_status()
self.frame += 1
self.frame_times[self.frame % 100] = time.time()
message = (1 << 30) | self.frame
self.__send_to_clients(message, True)
if self.frame % 10 == 0:
for client in self.clients:
client.send_ping()
if self.frame % 200 == 0:
self.__print_status()
self.__check_game()
def __check_game(self):
oldest_frame, frames = self.__check_frame_status()
while oldest_frame > self.verified_frame:
self.check_synchronization(self.verified_frame + 1)
self.verified_frame += 1
diff = self.frame - oldest_frame
if diff <= max_drift:
# clients have not drifted too far
return
first = True
count = 0;
while diff > 0 and not self.stop:
if first:
first = False
print("---", self.frame, "acked", frames)
elif count % 100 == 0:
print(" ", self.frame, "acked", frames)
time.sleep(0.02)
oldest_frame, frames = self.__check_frame_status()
diff = self.frame - oldest_frame
count += 1
self.time = time.time() - 0.02
def __check_frame_status(self):
oldest_frame = self.frame
frames = []
with self.lock:
for client in self.clients:
af = client.frame
if af < oldest_frame:
oldest_frame = af
frames.append(af)
return oldest_frame, frames
def __print_status(self):
for i, client in enumerate(self.clients):
print("{0} f {1:6d} p avg: {2:3d} {3:3d}".format(i,
client.frame, int(client.pings_avg * 1000),
int(client.lag * 1000)))
def __send_status(self):
for i, client in enumerate(self.clients):
v = int(client.lag * 1000) & 0x0000ffff
message = 0x80000000 | MESSAGE_PLAYER_LAG << 24 | i << 16 | v
self.__send_to_clients(message)
v = int(client.pings_avg * 1000) & 0x0000ffff
message = 0x80000000 | MESSAGE_PLAYER_PING << 24 | i << 16 | v
self.__send_to_clients(message)
def add_input_event(self, client, input_event):
if not self.started:
# game was not started - ignoring input event
print("game not started, ignoring input event {0:08x}".format(
input_event))
return
with self.lock:
if not client.playing:
print("client", client, "is no longer valid, dropping msg")
return
# for now, just broadcast out again to all clients
message = (1 << 29) | input_event
self.__send_to_clients(message)
def add_text_message(self, from_client, text):
print("add text message")
with self.lock:
for client in self.clients:
#if from_client == client:
# continue
message = 0x80000000 | MESSAGE_TEXT << 24 \
| from_client.player << 16 | len(text)
message = int_to_bytes(message) + text
print("send", repr(message), "to", client)
client.queue_bytes(message)
def send_to_clients(self, message, force_send=False):
# using a lock here to ensure that send_to_clients can
# be called from multiple threads, but all clients will
# still get messages in expected order
with self.lock:
self.__send_to_clients(message, force_send)
def __send_to_clients(self, message, force_send=False):
for client in self.clients:
#print("send", message, "to", client)
client.queue_message(message)
if force_send:
client.send_queued_messages()
def check_synchronization(self, check_frame):
# FIXME: MOVE TO GAME CLASS
# FIXME: ONLY RUN ONCE PER FRAME, not once per frame * clients
#if self.frame == 0:
# return
with game.lock:
#print("received memcheck", data)
#check_frame = self.frame - 1
if check_frame < 0:
return
index = check_frame % 100
mem_check_data = []
rnd_check_data = []
for client in game.clients:
if client.frame <= check_frame:
# cannot check yet
return
#print(client, client.frame, client.memcheck)
mem_check_data.append((client, client.memcheck[index]))
rnd_check_data.append((client, client.rndcheck[index]))
check = mem_check_data[0][1]
for client, data in mem_check_data:
if check != data:
print("memcheck failed for frame", check_frame)
for c, d in mem_check_data:
print("* {0:08x} f {1:05d} {2}".format(d[0], d[1], c))
for c in game.clients:
c.send_error_message(ERROR_MEMORY_DESYNC)
c.send_queued_messages()
raise Exception("mem check failed")
check = rnd_check_data[0][1]
for client, data in rnd_check_data:
if check != data:
print("rndcheck failed for frame", check_frame)
for c, d in rnd_check_data:
print("* {0:08x} f {1:05d} {2}".format(d[0], d[1], c))
for c in game.clients:
c.send_error_message(ERROR_RANDOM_DESYNC)
c.send_queued_messages()
raise Exception("rnd check failed")
address_map = {
}
def accept_client(server_socket):
global last_keepalive_time
server_socket.settimeout(1)
client_socket, client_address = server_socket.accept()
#client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDLOWAT, 4)
client_socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
client_socket.settimeout(None)
client = Client(client_socket, client_address)
#client.player = len(game.clients) + 1
print("Client connected", client)
last_keepalive_time = time.time()
#game.add_client(client)
#client.start()
#if game.can_start():
# game.start()
#stop_accepting = False
def accept_thread(server_socket):
while not game.stop:
try:
accept_client(server_socket)
except socket.timeout:
pass
except Exception:
traceback.print_exc()
time.sleep(1.0)
def _run_server():
global game
global last_keepalive_time
game = Game(num_clients)
address = (host, port)
server_socket = socket.socket()
server_socket.bind(address)
server_socket.listen(10)
print("listening")
sys.stdout.flush()
if address[0] != "0.0.0.0":
print("server listening on", address[0], "port", address[1])
else:
print("server listening on port", address[1])
print("want", num_clients, "client(s)")
if num_clients > MAX_PLAYERS:
print("ERROR: max clients are", MAX_PLAYERS)
threading.Thread(target=accept_thread, args=(server_socket,)).start()
last_keepalive_time = time.time()
while not game.started:
time.sleep(0.1)
if launch_timeout:
t2 = time.time()
if t2 - last_keepalive_time > launch_timeout:
print("game not started yet, aborting (timeout)")
game.stop = True
return
#sys.exit()
print("game started")
while not game.stop:
time.sleep(0.1)
def run_server():
try:
_run_server()
except Exception:
traceback.print_exc()
except KeyboardInterrupt:
traceback.print_exc()
game.stop = True
if __name__ == "__main__":
run_server()
| gpl-2.0 |
djaodjin/djaodjin-pages | testsite/templatetags/testsite_tags.py | 1 | 1914 | # Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from django import template
from django.contrib.messages.api import get_messages
from django.forms import BaseForm
from django.utils.safestring import mark_safe
import six
register = template.Library()
@register.filter()
def messages(obj):
"""
Messages to be displayed to the current session.
"""
if isinstance(obj, BaseForm):
return obj.non_field_errors()
return get_messages(obj)
@register.filter
def to_json(value):
if isinstance(value, six.string_types):
return value
return mark_safe(json.dumps(value))
| bsd-2-clause |
subramani95/neutron | neutron/debug/debug_agent.py | 3 | 7740 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe'
class NeutronDebugAgent():
OPTS = [
# Needed for drivers
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
]
def __init__(self, conf, client, driver):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
interface_name = self.driver.get_device_name(port)
namespace = None
if self.conf.use_namespaces:
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return dhcp.DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = dhcp.DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probe(self):
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
def delete_probe(self, port_id):
port = dhcp.DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces and ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warn(_('Failed to delete namespace %s'), namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(
dhcp.DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = dhcp.DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces:
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
else:
return utils.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return dhcp.DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
host = self.conf.host
body = {'port': {'admin_state_up': True,
'network_id': network.id,
'device_id': '%s' % socket.gethostname(),
'device_owner': '%s:probe' % device_owner,
'tenant_id': network.tenant_id,
'binding:host_id': host,
'fixed_ips': [dict(subnet_id=s.id)
for s in network.subnets]}}
port_dict = self.client.create_port(body)['port']
port = dhcp.DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| apache-2.0 |
joshuajan/odoo | addons/account_check_writing/__init__.py | 446 | 1111 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import account_voucher
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
popazerty/enigma2-4.3 | lib/python/Screens/PowerTimerEntry.py | 8 | 15641 | from Screens.Screen import Screen
from Components.config import ConfigSelection, ConfigSelectionNumber, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, ConfigInteger, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Button import Button
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Pixmap import Pixmap
from Components.SystemInfo import SystemInfo
from PowerTimer import AFTEREVENT, TIMERTYPE
from time import localtime, mktime, time, strftime
from datetime import datetime
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.entryDate = None
self.entryService = None
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self["summary_description"] = StaticText("")
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd,
"up": self.keyUp,
"down": self.keyDown
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.setTitle(_("PowerManager entry"))
self.createSetup("config")
def createConfig(self):
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.WAKEUPTOSTANDBY: "wakeuptostandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby"
}[self.timer.afterEvent]
timertype = {
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timer.timerType]
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
# calculate default values
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
if self.timer.repeated: # repeated
type = "repeated"
if self.timer.repeated == 31: # Mon-Fri
repeated = "weekdays"
elif self.timer.repeated == 127: # daily
repeated = "daily"
else:
flags = self.timer.repeated
repeated = "user"
count = 0
for x in (0, 1, 2, 3, 4, 5, 6):
if flags == 1: # weekly
print "Set to weekday " + str(x)
weekday = x
if flags & 1 == 1: # set user defined flags
day[x] = 1
count += 1
else:
day[x] = 0
flags >>= 1
if count == 1:
repeated = "weekly"
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
autosleepinstandbyonly = self.timer.autosleepinstandbyonly
autosleepdelay = self.timer.autosleepdelay
autosleeprepeat = self.timer.autosleeprepeat
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.timerentry_timertype = ConfigSelection(choices = [("wakeup", _("wakeup")),("wakeuptostandby", _("wakeup to standby")), ("autostandby", _("auto standby")), ("autodeepstandby", _("auto deepstandby")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("reboot", _("reboot system")), ("restart", _("restart GUI"))], default = timertype)
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("wakeuptostandby", _("wakeup to standby")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("nothing", _("do nothing"))], default = afterevent)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("daily", _("daily")), ("weekly", _("weekly")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerrntry_autosleepdelay = ConfigSelectionNumber(default = autosleepdelay, stepwidth = 15, min = 15, max = 300, wraparound = True)
self.timerentry_autosleeprepeat = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = autosleeprepeat)
self.timerrntry_autosleepinstandbyonly = ConfigSelection(choices = [("yes",_("Yes")), ("yesNWno",_("Yes, and no network traffic")), ("no", _("No"))],default=autosleepinstandbyonly)
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = (((self.timer.end - self.timer.begin) /60 ) > 1), choices = [(True, _("yes")), (False, _("no"))])
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
def createSetup(self, widget):
self.list = []
self.timerType = getConfigListEntry(_("Timer type"), self.timerentry_timertype)
self.list.append(self.timerType)
if self.timerentry_timertype.value == "autostandby" or self.timerentry_timertype.value == "autodeepstandby":
if self.timerentry_timertype.value == "autodeepstandby":
self.list.append(getConfigListEntry(_("Only active when in standby"), self.timerrntry_autosleepinstandbyonly))
self.list.append(getConfigListEntry(_("Sleep delay"), self.timerrntry_autosleepdelay))
self.list.append(getConfigListEntry(_("Repeat type"), self.timerentry_autosleeprepeat))
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime)
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated)
else:
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type)
self.list.append(self.timerTypeEntry)
if self.timerentry_type.value == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated)
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate)
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.value == "daily":
pass
if self.timerentry_repeated.value == "weekdays":
pass
if self.timerentry_repeated.value == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.value == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date)
if self.timerentry_type.value == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime)
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime)
self.list.append(self.entryShowEndTime)
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime)
if self.timerentry_showendtime.value:
self.list.append(self.entryEndTime)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
self[widget].list = self.list
self[widget].l.setList(self.list)
self.checkSummary()
def createSummary(self):
pass
def checkSummary(self):
self["summary_description"].text = self["config"].getCurrent()[0]
def newConfig(self):
if self["config"].getCurrent() in (self.timerType, self.timerTypeEntry, self.frequencyEntry, self.entryShowEndTime):
self.createSetup("config")
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def keySelect(self):
cur = self["config"].getCurrent()
self.keyGo()
def keyUp(self):
self["config"].moveUp()
self.checkSummary()
def keyDown(self):
self["config"].moveDown()
self.checkSummary()
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.value
endtime = self.timerentry_endtime.value
starttime = self.timerentry_starttime.value
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
return begin, end
def keyGo(self, result = None):
if not self.timerentry_showendtime.value:
self.timerentry_endtime.value = self.timerentry_starttime.value
self.timer.resetRepeated()
self.timer.timerType = {
"wakeup": TIMERTYPE.WAKEUP,
"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
"autostandby": TIMERTYPE.AUTOSTANDBY,
"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
"standby": TIMERTYPE.STANDBY,
"deepstandby": TIMERTYPE.DEEPSTANDBY,
"reboot": TIMERTYPE.REBOOT,
"restart": TIMERTYPE.RESTART
}[self.timerentry_timertype.value]
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY
}[self.timerentry_afterevent.value]
if self.timerentry_type.value == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_timertype.value == "autostandby" or self.timerentry_timertype.value == "autodeepstandby":
self.timer.begin = int(time()) + 10
self.timer.end = self.timer.begin
self.timer.autosleepinstandbyonly = self.timerrntry_autosleepinstandbyonly.value
self.timer.autosleepdelay = self.timerrntry_autosleepdelay.value
self.timer.autosleeprepeat = self.timerentry_autosleeprepeat.value
if self.timerentry_type.value == "repeated":
if self.timerentry_repeated.value == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.value == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].value:
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_endtime.value)
else:
self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.value)
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
self.saveTimer()
self.close((True, self.timer))
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value += 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value -= 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def saveTimer(self):
self.session.nav.PowerTimer.saveTimer()
def keyCancel(self):
self.close((False,))
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.skinName = "TimerLog"
self.timer = timer
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["summary_description"] = StaticText("")
self["key_red"] = Button(_("Delete entry"))
self["key_green"] = Button()
self["key_yellow"] = Button("")
self["key_blue"] = Button(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("PowerManager log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
self["summary_description"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
| gpl-2.0 |
kogotko/carburetor | openstack_dashboard/dashboards/project/cgroups/workflows.py | 4 | 16683 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
INDEX_URL = "horizon:project:cgroups:index"
CGROUP_VOLUME_MEMBER_SLUG = "update_members"
def cinder_az_supported(request):
try:
return cinder.extension_supported(request, 'AvailabilityZones')
except Exception:
exceptions.handle(request, _('Unable to determine if availability '
'zones extension is supported.'))
return False
def availability_zones(request):
zone_list = []
if cinder_az_supported(request):
try:
zones = api.cinder.availability_zone_list(request)
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
except Exception:
exceptions.handle(request, _('Unable to retrieve availability '
'zones.'))
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
class AddCGroupInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
max_length=255)
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False,
widget=forms.ThemableSelectWidget(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Availability Zone'),
'data-source-image_source': _('Availability Zone')}))
def __init__(self, request, *args, **kwargs):
super(AddCGroupInfoAction, self).__init__(request,
*args,
**kwargs)
self.fields['availability_zone'].choices = \
availability_zones(request)
class Meta(object):
name = _("Consistency Group Information")
help_text = _("Volume consistency groups provide a mechanism for "
"creating snapshots of multiple volumes at the same "
"point-in-time to ensure data consistency\n\n"
"A consistency group can support more than one volume "
"type, but it can only contain volumes hosted by the "
"same back end.")
slug = "set_cgroup_info"
def clean(self):
cleaned_data = super(AddCGroupInfoAction, self).clean()
name = cleaned_data.get('name')
try:
cgroups = cinder.volume_cgroup_list(self.request)
except Exception:
msg = _('Unable to get consistency group list')
exceptions.check_message(["Connection", "refused"], msg)
raise
if cgroups is not None and name is not None:
for cgroup in cgroups:
if cgroup.name.lower() == name.lower():
# ensure new name has reasonable length
formatted_name = name
if len(name) > 20:
formatted_name = name[:14] + "..." + name[-3:]
raise forms.ValidationError(
_('The name "%s" is already used by '
'another consistency group.')
% formatted_name
)
return cleaned_data
class AddCGroupInfoStep(workflows.Step):
action_class = AddCGroupInfoAction
contributes = ("availability_zone",
"description",
"name")
class AddVolumeTypesToCGroupAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(AddVolumeTypesToCGroupAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to get the available volume types')
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False)
self.fields[default_role_field_name].initial = 'member'
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
vtypes = []
try:
vtypes = cinder.volume_type_list(request)
except Exception:
exceptions.handle(request, err_msg)
vtype_list = [(vtype.id, vtype.name)
for vtype in vtypes]
self.fields[field_name].choices = vtype_list
class Meta(object):
name = _("Manage Volume Types")
slug = "add_vtypes_to_cgroup"
def clean(self):
cleaned_data = super(AddVolumeTypesToCGroupAction, self).clean()
volume_types = cleaned_data.get('add_vtypes_to_cgroup_role_member')
if not volume_types:
raise forms.ValidationError(
_('At least one volume type must be assigned '
'to a consistency group.')
)
return cleaned_data
class AddVolTypesToCGroupStep(workflows.UpdateMembersStep):
action_class = AddVolumeTypesToCGroupAction
help_text = _("Add volume types to this consistency group. "
"Multiple volume types can be added to the same "
"consistency group only if they are associated with "
"same back end.")
available_list_title = _("All available volume types")
members_list_title = _("Selected volume types")
no_available_text = _("No volume types found.")
no_members_text = _("No volume types selected.")
show_roles = False
contributes = ("volume_types",)
def contribute(self, data, context):
if data:
member_field_name = self.get_member_field_name('member')
context['volume_types'] = data.get(member_field_name, [])
return context
class AddVolumesToCGroupAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(AddVolumesToCGroupAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to get the available volumes')
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False)
self.fields[default_role_field_name].initial = 'member'
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
vtypes = self.initial['vtypes']
try:
# get names of volume types associated with CG
vtype_names = []
volume_types = cinder.volume_type_list(request)
for volume_type in volume_types:
if volume_type.id in vtypes:
vtype_names.append(volume_type.name)
# collect volumes that are associated with volume types
vol_list = []
volumes = cinder.volume_list(request)
for volume in volumes:
if volume.volume_type in vtype_names:
cgroup_id = None
vol_is_available = False
in_this_cgroup = False
if hasattr(volume, 'consistencygroup_id'):
# this vol already belongs to a CG.
# only include it here if it belongs to this CG
cgroup_id = volume.consistencygroup_id
if not cgroup_id:
# put this vol in the available list
vol_is_available = True
elif cgroup_id == self.initial['cgroup_id']:
# put this vol in the assigned to CG list
vol_is_available = True
in_this_cgroup = True
if vol_is_available:
vol_list.append({'volume_name': volume.name,
'volume_id': volume.id,
'in_cgroup': in_this_cgroup,
'is_duplicate': False})
sorted_vol_list = sorted(vol_list, key=lambda k: k['volume_name'])
# mark any duplicate volume names
for index, volume in enumerate(sorted_vol_list):
if index < len(sorted_vol_list) - 1:
if volume['volume_name'] == \
sorted_vol_list[index + 1]['volume_name']:
volume['is_duplicate'] = True
sorted_vol_list[index + 1]['is_duplicate'] = True
# update display with all available vols and those already
# assigned to consistency group
available_vols = []
assigned_vols = []
for volume in sorted_vol_list:
if volume['is_duplicate']:
# add id to differentiate volumes to user
entry = volume['volume_name'] + \
" [" + volume['volume_id'] + "]"
else:
entry = volume['volume_name']
available_vols.append((volume['volume_id'], entry))
if volume['in_cgroup']:
assigned_vols.append(volume['volume_id'])
except Exception:
exceptions.handle(request, err_msg)
self.fields[field_name].choices = \
available_vols
self.fields[field_name].initial = assigned_vols
class Meta(object):
name = _("Manage Volumes")
slug = "add_volumes_to_cgroup"
class AddVolumesToCGroupStep(workflows.UpdateMembersStep):
action_class = AddVolumesToCGroupAction
help_text = _("Add/remove volumes to/from this consistency group. "
"Only volumes associated with the volume type(s) assigned "
"to this consistency group will be available for selection.")
available_list_title = _("All available volumes")
members_list_title = _("Selected volumes")
no_available_text = _("No volumes found.")
no_members_text = _("No volumes selected.")
show_roles = False
depends_on = ("cgroup_id", "name", "vtypes")
contributes = ("volumes",)
def contribute(self, data, context):
if data:
member_field_name = self.get_member_field_name('member')
context['volumes'] = data.get(member_field_name, [])
return context
class CreateCGroupWorkflow(workflows.Workflow):
slug = "create_cgroup"
name = _("Create Consistency Group")
finalize_button_name = _("Create Consistency Group")
failure_message = _('Unable to create consistency group.')
success_message = _('Created new volume consistency group')
success_url = INDEX_URL
default_steps = (AddCGroupInfoStep,
AddVolTypesToCGroupStep)
def handle(self, request, context):
selected_vol_types = context['volume_types']
try:
vol_types = cinder.volume_type_list_with_qos_associations(
request)
except Exception:
msg = _('Unable to get volume type list')
exceptions.check_message(["Connection", "refused"], msg)
return False
# ensure that all selected volume types share same backend name
backend_name = None
invalid_backend = False
for selected_vol_type in selected_vol_types:
if not invalid_backend:
for vol_type in vol_types:
if selected_vol_type == vol_type.id:
if (hasattr(vol_type, "extra_specs") and
'volume_backend_name' in vol_type.extra_specs):
vol_type_backend = \
vol_type.extra_specs['volume_backend_name']
if vol_type_backend is None:
invalid_backend = True
break
if backend_name is None:
backend_name = vol_type_backend
if vol_type_backend != backend_name:
invalid_backend = True
break
else:
invalid_backend = True
break
if invalid_backend:
msg = _('All selected volume types must be associated '
'with the same volume backend name.')
exceptions.handle(request, msg)
return False
try:
vtypes_str = ",".join(context['volume_types'])
self.object = \
cinder.volume_cgroup_create(
request,
vtypes_str,
context['name'],
description=context['description'],
availability_zone=context['availability_zone'])
except Exception:
exceptions.handle(request, _('Unable to create consistency '
'group.'))
return False
return True
class UpdateCGroupWorkflow(workflows.Workflow):
slug = "update_cgroup"
name = _("Add/Remove Consistency Group Volumes")
finalize_button_name = _("Submit")
success_message = _('Updated volumes for consistency group "%s".')
failure_message = _('Unable to update volumes for consistency group')
success_url = INDEX_URL
default_steps = (AddVolumesToCGroupStep,)
def handle(self, request, context):
cgroup_id = context['cgroup_id']
add_vols = []
remove_vols = []
try:
selected_volumes = context['volumes']
volumes = cinder.volume_list(request)
# scan all volumes and make correct consistency group is set
for volume in volumes:
selected = False
for selection in selected_volumes:
if selection == volume.id:
selected = True
break
if selected:
# ensure this volume is in this consistency group
if hasattr(volume, 'consistencygroup_id'):
if volume.consistencygroup_id != cgroup_id:
add_vols.append(volume.id)
else:
add_vols.append(volume.id)
else:
# ensure this volume is not in our consistency group
if hasattr(volume, 'consistencygroup_id'):
if volume.consistencygroup_id == cgroup_id:
# remove from this CG
remove_vols.append(volume.id)
add_vols_str = ",".join(add_vols)
remove_vols_str = ",".join(remove_vols)
if not add_vols_str and not remove_vols_str:
# nothing to change
return True
cinder.volume_cgroup_update(request,
cgroup_id,
name=context['name'],
add_vols=add_vols_str,
remove_vols=remove_vols_str)
except Exception:
# error message supplied by form
return False
return True
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.resolveurl/lib/resolveurl/plugins/rapidvideocom.py | 2 | 2285 | # -*- coding: utf-8 -*-
"""
resolveurl Kodi Plugin
Copyright (C) 2018 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib2
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class RapidVideoResolver(ResolveUrl):
name = "rapidvideo.com"
domains = ["rapidvideo.com", "rapidvideo.is"]
pattern = '(?://|\.)(rapidvideo\.(?:com|is))/(?:[ev]/|embed/|\?v=|embed/\?v=)?([0-9A-Za-z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
try:
html = self.net.http_GET(web_url, headers=headers).content
except urllib2.HTTPError as e:
if e.code == 404:
raise ResolverError("Video not found")
srcs = re.findall(r'href="(%s&q=[^"]+)' % web_url, html, re.I)
if srcs:
sources = []
for src in srcs:
shtml = self.net.http_GET(src, headers=headers).content
strurl = helpers.parse_html5_source_list(shtml)
if strurl:
sources.append(strurl[0])
else:
sources = helpers.parse_html5_source_list(html)
if len(sources) > 0:
sources = helpers.sort_sources_list(sources)
return helpers.pick_source(sources) + helpers.append_headers(headers)
else:
raise ResolverError("Video not found")
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://www.{host}/v/{media_id}')
| gpl-2.0 |
v-iam/azure-sdk-for-python | azure-mgmt-sql/azure/mgmt/sql/models/server_communication_link.py | 4 | 2123 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ServerCommunicationLink(ProxyResource):
"""Server communication link.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar state: The state.
:vartype state: str
:param partner_server: The name of the partner server.
:type partner_server: str
:ivar location: Communication link location.
:vartype location: str
:ivar kind: Communication link kind. This property is used for Azure
Portal metadata.
:vartype kind: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'readonly': True},
'partner_server': {'required': True},
'location': {'readonly': True},
'kind': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'partner_server': {'key': 'properties.partnerServer', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(self, partner_server):
super(ServerCommunicationLink, self).__init__()
self.state = None
self.partner_server = partner_server
self.location = None
self.kind = None
| mit |
alexholcombe/twoWords | Charlie/stringResponse.py | 2 | 7552 | from psychopy import event, sound
import numpy as np
import string
from copy import deepcopy
import time
def drawResponses(responses,respStim,numCharsWanted,drawBlanks):
'''Draw the letters the user has entered
drawBlanks is whether to show empty spaces with _, that's why numCharsWanted would be needed
'''
respStr = ''.join(responses) #converts list of characters (responses) into string
#print 'responses=',responses,' respStr = ', respStr #debugOFF
if drawBlanks:
blanksNeeded = numCharsWanted - len(respStr)
respStr = respStr + '_'*blanksNeeded
respStim.setText(respStr,log=False)
respStim.draw();
def collectStringResponse(numCharsWanted,x,respPromptStim,respStim,acceptTextStim,fixation,myWin,
clickSound,badKeySound,requireAcceptance,autopilot,responseDebug=False):
'''respPromptStim should be a stimulus with a draw() method
'''
event.clearEvents() #clear the keyboard buffer
respStim.setPos([x,0])
drawBlanks = True
expStop = False
passThisTrial = False
responses=[]
numResponses = 0
accepted = True
if requireAcceptance: #require user to hit ENTER to finalize response
accepted = False
while not expStop and (numResponses < numCharsWanted or not accepted):
noResponseYet = True
thisResponse=''
while noResponseYet: #loop until a valid key is hit
if fixation is not None:
fixation.draw()
respPromptStim.draw()
drawResponses(responses,respStim,numCharsWanted,drawBlanks)
myWin.flip()
click = False
if autopilot: #need to wait otherwise dont have chance to press a key
for f in range(20): time.sleep(.01) #core.wait(1.0/60) #myWin.flip()
keysPressed = event.getKeys()
keysPressed = [key.upper() for key in keysPressed] #transform to uppercase
if autopilot:
noResponseYet = False
numResponses = numCharsWanted
if 'ESCAPE' in keysPressed:
expStop = True
elif len(keysPressed) > 0:
key = keysPressed[-1] #process only the last key, it being the most recent. In theory person could type more than one key between window flips,
#but that might be hard to handle.
key = key.upper()
thisResponse = key
if key in ['ESCAPE']:
expStop = True
noResponseYet = False
# if key in ['SPACE']: #observer opting out because think they moved their eyes
# passThisTrial = True
# noResponseYet = False
elif key in string.ascii_letters:
noResponseYet = False
responses.append(thisResponse)
numResponses += 1 #not just using len(responses) because want to work even when autopilot, where thisResponse is null
click = True
elif key in ['BACKSPACE','DELETE']:
if len(responses) >0:
responses.pop()
numResponses -= 1
else: #invalid key pressed
badKeySound.play()
if click and (click is not None):
clickSound.play()
drawResponses(responses,respStim,numCharsWanted,drawBlanks)
myWin.flip() #draw again, otherwise won't draw the last key
if (numResponses == numCharsWanted) and requireAcceptance: #ask participant to HIT ENTER TO ACCEPT
waitingForAccept = True
while waitingForAccept and not expStop:
if fixation is not None:
fixation.draw()
acceptTextStim.draw()
respStim.draw()
for key in event.getKeys():
key = key.upper()
if key in ['ESCAPE']:
expStop = True
#noResponseYet = False
elif key in ['ENTER','RETURN']:
waitingForAccept = False
accepted = True
elif key in ['BACKSPACE','DELETE']:
waitingForAccept = False
numResponses -= 1
responses.pop()
drawResponses(responses,respStim,numCharsWanted,drawBlanks)
myWin.flip() #draw again, otherwise won't draw the last key
myWin.flip() #end of waitingForAccept loop
#end of waiting until response is finished, all keys and acceptance if required
responsesAutopilot = np.array( numCharsWanted*list([('A')]) )
responses=np.array( responses )
#print 'responses=', responses,' responsesAutopilot=', responsesAutopilot #debugOFF
return expStop,passThisTrial,responses,responsesAutopilot
# #######End of function definition that collects responses!!!! #####################################
def setupSoundsForResponse():
fileName = '406__tictacshutup__click-1-d.wav'
try:
clickSound=sound.Sound(fileName)
except:
print 'Could not load the desired click sound file, instead using manually created inferior click'
try:
clickSound=sound.Sound('D',octave=3, sampleRate=22050, secs=0.015, bits=8)
except:
clickSound = None
print 'Could not create a click sound for typing feedback'
try:
badKeySound = sound.Sound('A',octave=5, sampleRate=22050, secs=0.03, bits=8)
except:
badKeySound = None
print 'Could not create an invalid key sound for typing feedback'
return clickSound, badKeySound
if __name__=='__main__': #Running this file directly, must want to test functions in this file
from psychopy import monitors, visual, event, data, logging, core, sound, gui
window = visual.Window()
msg = visual.TextStim(window, text='press a key\n<esc> to quit')
msg.draw()
window.flip()
autoLogging=False
autopilot = False
#create click sound for keyboard
clickSound, badKeySound = setupSoundsForResponse()
respPromptStim = visual.TextStim(window,pos=(0, -.7),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(window,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(window,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=.16,units='norm',autoLog=autoLogging)
responseDebug=False; responses = list(); responsesAutopilot = list();
numCharsWanted = 5
respPromptStim.setText('Enter your ' + str(numCharsWanted) + '-character response')
requireAcceptance = True
x=-.2 #x offset relative to centre of screen
expStop,passThisTrial,responses,responsesAutopilot = \
collectStringResponse(numCharsWanted,x,respPromptStim,respStim,acceptTextStim,None,window,clickSound,badKeySound,requireAcceptance,autopilot,responseDebug=True)
print('responses=',responses)
print('expStop=',expStop,' passThisTrial=',passThisTrial,' responses=',responses, ' responsesAutopilot =', responsesAutopilot)
print('Finished') | mit |
nlloyd/SubliminalCollaborator | libs/twisted/internet/reactor.py | 6 | 1887 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The reactor is the Twisted event loop within Twisted, the loop which drives
applications using Twisted. The reactor provides APIs for networking,
threading, dispatching events, and more.
The default reactor depends on the platform and will be installed if this
module is imported without another reactor being explicitly installed
beforehand. Regardless of which reactor is installed, importing this module is
the correct way to get a reference to it.
New application code should prefer to pass and accept the reactor as a
parameter where it is needed, rather than relying on being able to import this
module to get a reference. This simplifies unit testing and may make it easier
to one day support multiple reactors (as a performance enhancement), though
this is not currently possible.
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
@see: L{IReactorArbitrary<twisted.internet.interfaces.IReactorArbitrary>}
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
"""
import sys
del sys.modules['twisted.internet.reactor']
from twisted.internet import default
default.install()
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pyasn1/type/useful.py | 25 | 1159 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ, char, tag
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
class GeneralizedTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
class UTCTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
| mit |
kevin-coder/tensorflow-fork | tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py | 8 | 7386 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sharded mutable dense hash table (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from tensorflow.contrib import lookup
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
# TODO(rohanj): This should subclass Trackable and implement
# _gather_saveables_for_checkpoint.
class ShardedMutableDenseHashTable(object):
"""A sharded version of MutableDenseHashTable.
It is designed to be interface compatible with LookupInterface and
MutableDenseHashTable, with the exception of the export method, which is
replaced by an export_sharded method.
The _ShardedMutableDenseHashTable keeps `num_shards` MutableDenseHashTable
internally. The shard is computed via the modulo operation on the key.
"""
# TODO(andreasst): consider moving this to lookup module
@deprecation.deprecated(
None, 'This class is deprecated. To UPDATE or USE linear optimizers, '
'please check its latest version in core: '
'tensorflow_estimator/python/estimator/canned/linear_optimizer/.')
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
deleted_key,
num_shards=1,
checkpoint=True,
name='ShardedMutableHashTable'):
self._key_dtype = key_dtype
self._value_dtype = value_dtype
with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
self._table_name = scope
table_shards = []
for i in range(num_shards):
table_shards.append(
lookup.MutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
checkpoint=checkpoint,
name='%s-%d-of-%d' % (name, i + 1, num_shards)))
self._table_shards = table_shards
# TODO(andreasst): add a value_shape() method to LookupInterface
# pylint: disable=protected-access
self._value_shape = self._table_shards[0]._value_shape
# pylint: enable=protected-access
@property
def name(self):
return self._table_name
@property
def _num_shards(self):
return len(self._table_shards)
@property
def table_shards(self):
return self._table_shards
def size(self, name=None):
with ops.name_scope(name, 'sharded_mutable_hash_table_size'):
sizes = [
self._table_shards[i].size() for i in range(self._num_shards)
]
return math_ops.add_n(sizes)
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.slice(keys, [0, 0], [key_shape.dims[0].value, 1])
keys = array_ops.reshape(keys, [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
def _check_keys(self, keys):
if not keys.get_shape().is_fully_defined():
raise ValueError('Key shape must be fully defined, got %s.' %
keys.get_shape())
if keys.get_shape().ndims != 1 and keys.get_shape().ndims != 2:
raise ValueError('Expected a vector or matrix for keys, got %s.' %
keys.get_shape())
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError('Signature mismatch. Keys must be dtype %s, got %s.' %
(self._key_dtype, keys.dtype))
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].lookup(keys, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = [
self._table_shards[i].lookup(key_shards[i], name=name)
for i in range(num_shards)
]
num_keys = keys.get_shape().dims[0]
original_indices = math_ops.range(num_keys)
partitioned_indices = data_flow_ops.dynamic_partition(original_indices,
shard_indices,
num_shards)
result = data_flow_ops.dynamic_stitch(partitioned_indices, value_shards)
result.set_shape(
tensor_shape.TensorShape([num_keys]).concatenate(self._value_shape))
return result
def insert(self, keys, values, name=None):
"""Inserts `keys` in a table."""
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].insert(keys, values, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
num_shards)
return_values = [
self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
for i in range(num_shards)
]
return control_flow_ops.group(*return_values)
def export_sharded(self, name=None):
"""Returns lists of the keys and values tensors in the sharded table.
Args:
name: name of the table.
Returns:
A pair of lists with the first list containing the key tensors and the
second list containing the value tensors from each shard.
"""
keys_list = []
values_list = []
for table_shard in self._table_shards:
exported_keys, exported_values = table_shard.export(name=name)
keys_list.append(exported_keys)
values_list.append(exported_values)
return keys_list, values_list
| apache-2.0 |
jacobian-archive/django-buildmaster | djangobotcfg/buildsteps.py | 2 | 6606 | """
Individual custom build steps for the Django tests.
See the docstring in builders.py for an overview of how these all fit together.
I'm using subclasses (instead of just passing arguments) since it makes the
overall build factory in builders.py easier to read. Unfortunately it makes some
of what's going here a bit more confusing. Win some, lose some.
"""
import textwrap
from buildbot.steps.source import SVN
from buildbot.steps.shell import Test, ShellCommand
from buildbot.steps.transfer import FileDownload, StringDownload
from buildbot.process.properties import WithProperties
class DjangoSVN(SVN):
"""
Checks Django out of SVN.
Django uses a slightly weird branch scheme; this calculates the rght branch
URL from a simple branch name.
"""
name = 'svn checkout'
def __init__(self, branch=None, **kwargs):
if branch is None or branch == 'trunk':
svnurl = 'http://code.djangoproject.com/svn/django/trunk'
else:
svnurl = 'http://code.djangoproject.com/svn/django/branches/releases/%s' % branch
kwargs['svnurl'] = svnurl
kwargs['mode'] = 'clobber'
SVN.__init__(self, **kwargs)
self.addFactoryArguments(branch=branch)
class DownloadVirtualenv(FileDownload):
"""
Downloads virtualenv from the master to the slave.
"""
name = 'virtualenv download'
flunkOnFailure = True
haltOnFailure = True
def __init__(self, **kwargs):
FileDownload.__init__(self,
mastersrc = 'virtualenv.py',
slavedest = 'virtualenv.py',
)
class UpdateVirtualenv(ShellCommand):
"""
Updates (or creates) the virtualenv, installing dependencies as needed.
"""
name = 'virtualenv setup'
description = 'updating env'
descriptionDone = 'updated env'
flunkOnFailure = True
haltOnFailure = True
def __init__(self, python, db, **kwargs):
### XXX explain wtf is going on below - double string interpolation, WithProperties... ugh.
command = [
r'PYTHON=%%(python%s)s;' % python,
r'VENV=../venv-python%s-%s%s;' % (python, db.name, db.version),
# Create or update the virtualenv
r'$PYTHON virtualenv.py --distribute --no-site-packages $VENV || exit 1;',
# Reset $PYTHON and $PIP to the venv python
r'PYTHON=$PWD/$VENV/bin/python;',
r'PIP=$PWD/$VENV/bin/pip;',
]
# Commands to install database dependencies if needed.
if db.name == 'sqlite':
command.extend([
r"$PYTHON -c 'import sqlite3' 2>/dev/null || ",
r"$PYTHON -c 'import pysqlite2.dbapi2' ||",
r"$PIP install pysqlite || exit 1;",
])
elif db.name == 'postgresql':
command.append("$PYTHON -c 'import psycopg2' 2>/dev/null || $PIP install psycopg2==2.2.2 || exit 1")
elif db.name == 'mysql':
command.append("$PYTHON -c 'import MySQLdb' 2>/dev/null || $PIP install MySQL-python==1.2.3 || exit 1")
else:
raise ValueError("Bad DB: %r" % db.name)
kwargs['command'] = WithProperties("\n".join(command))
ShellCommand.__init__(self, **kwargs)
self.addFactoryArguments(python=python, db=db)
class GenerateSettings(StringDownload):
"""
Generates a testsettings.py on the server.
"""
name = 'generate settings'
def __init__(self, python, db, **kwargs):
try:
settings = getattr(self, 'get_%s_settings' % db.name)()
except AttributeError:
raise ValueError("Bad DB: %r" % db.name)
kwargs['s'] = settings
kwargs['slavedest'] = 'testsettings.py'
StringDownload.__init__(self, **kwargs)
self.addFactoryArguments(python=python, db=db)
def get_sqlite_settings(self):
return textwrap.dedent('''
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': 'other_db_%s' % os.getpid(),
}
}
''')
def get_postgresql_settings(self):
return textwrap.dedent('''
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_buildslave',
'HOST': 'localhost',
'USER': 'django_buildslave',
'PASSWORD': 'django_buildslave',
'TEST_NAME': 'django_buildslave_%s' % os.getpid(),
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': 'other_db_%s' % os.getpid(),
}
}
''')
def get_mysql_settings(self):
return textwrap.dedent('''
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djbuildslave',
'HOST': 'localhost',
'USER': 'djbuildslave',
'PASSWORD': 'djbuildslave',
'TEST_NAME': 'djbuild%s' % os.getpid(),
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': 'other_db_%s' % os.getpid(),
}
}
''')
class TestDjango(Test):
"""
Runs Django's tests.
"""
name = 'test'
def __init__(self, python, db, verbosity=2, **kwargs):
kwargs['command'] = [
'../venv-python%s-%s%s/bin/python' % (python, db.name, db.version),
'tests/runtests.py',
'--settings=testsettings',
'--verbosity=%s' % verbosity,
]
kwargs['env'] = {
'PYTHONPATH': '$PWD:$PWD/tests',
'LC_ALL': 'en_US.utf8',
}
Test.__init__(self, **kwargs)
# Make sure not to spuriously count a warning from test cases
# using the word "warning". So skip any "warnings" on lines starting
# with "test_"
self.addSuppression([(None, "^test_", None, None)])
self.addFactoryArguments(python=python, db=db, verbosity=verbosity) | bsd-3-clause |
rowemoore/odoo | addons/share/ir_model.py | 439 | 2272 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class ir_model_access(osv.Model):
_inherit = 'ir.model.access'
# overload group_names_with_access() to avoid returning sharing groups
# by filtering out groups with share=true.
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS true AND
(g.share IS NULL or g.share IS false) AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pierrelapointe/scancode-toolkit | tests/cluecode/data/ics/chromium-testing-gmock-test/gmock_test_utils.py | 13 | 1064 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | apache-2.0 |
davidzchen/tensorflow | tensorflow/python/distribute/distribute_config.py | 47 | 1810 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A configure tuple for high-level APIs for running distribution strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
class DistributeConfig(
collections.namedtuple(
'DistributeConfig',
['train_distribute', 'eval_distribute', 'remote_cluster'])):
"""A config tuple for distribution strategies.
Attributes:
train_distribute: a `DistributionStrategy` object for training.
eval_distribute: an optional `DistributionStrategy` object for
evaluation.
remote_cluster: a dict, `ClusterDef` or `ClusterSpec` object specifying
the cluster configurations. If this is given, the `train_and_evaluate`
method will be running as a standalone client which connects to the
cluster for training.
"""
def __new__(cls,
train_distribute=None,
eval_distribute=None,
remote_cluster=None):
return super(DistributeConfig, cls).__new__(cls, train_distribute,
eval_distribute, remote_cluster)
| apache-2.0 |
kennethreitz/lplex | logplex/packages/requests/packages/charade/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| mit |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/distinct_error.py | 1 | 1155 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'DistinctErrorEnum',
},
)
class DistinctErrorEnum(proto.Message):
r"""Container for enum describing possible distinct errors. """
class DistinctError(proto.Enum):
r"""Enum describing possible distinct errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ELEMENT = 2
DUPLICATE_TYPE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
jcmarks/jcmarks-fusion | lib/werkzeug/contrib/wrappers.py | 318 | 10331 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
| apache-2.0 |
cactusbin/nyt | matplotlib/examples/tests/backend_driver.py | 3 | 15228 | #!/usr/bin/env python
from __future__ import print_function, division
"""
This is used to drive many of the examples across the backends, for
regression testing, and comparing backend efficiency.
You can specify the backends to be tested either via the --backends
switch, which takes a comma-separated list, or as separate arguments,
e.g.
python backend_driver.py agg ps
would test the agg and ps backends. If no arguments are given, a
default list of backends will be tested.
Interspersed with the backend arguments can be switches for the Python
interpreter executing the tests. If entering such arguments causes an
option parsing error with the driver script, separate them from driver
switches with a --.
"""
import os
import time
import sys
import glob
from optparse import OptionParser
import matplotlib.rcsetup as rcsetup
from matplotlib.cbook import Bunch, dedent
all_backends = list(rcsetup.all_backends) # to leave the original list alone
# actual physical directory for each dir
dirs = dict(files=os.path.join('..', 'lines_bars_and_markers'),
shapes=os.path.join('..', 'shapes_and_collections'),
images=os.path.join('..', 'images_contours_and_fields'),
pie=os.path.join('..', 'pie_and_polar_charts'),
text=os.path.join('..', 'text_labels_and_annotations'),
ticks=os.path.join('..', 'ticks_and_spines'),
subplots=os.path.join('..', 'subplots_axes_and_figures'),
specialty=os.path.join('..', 'specialty_plots'),
showcase=os.path.join('..', 'showcase'),
pylab = os.path.join('..', 'pylab_examples'),
api = os.path.join('..', 'api'),
units = os.path.join('..', 'units'),
mplot3d = os.path.join('..', 'mplot3d'))
# files in each dir
files = dict()
files['lines'] = [
'barh_demo.py',
'fill_demo.py',
'fill_demo_features.py',
'line_demo_dash_control.py',
]
files['shapes'] = [
'path_patch_demo.py',
'scatter_demo.py',
]
files['colors'] = [
'color_cycle_demo.py',
]
files['images'] = [
'imshow_demo.py',
]
files['statistics'] = [
'errorbar_demo.py',
'errorbar_demo_features.py',
'histogram_demo_features.py',
]
files['pie'] = [
'pie_demo.py',
'polar_bar_demo.py',
'polar_scatter_demo.py',
]
files['text_labels_and_annotations'] = [
'text_demo_fontdict.py',
'unicode_demo.py',
]
files['ticks_and_spines'] = [
'spines_demo_bounds.py',
'ticklabels_demo_rotation.py',
]
files['subplots_axes_and_figures'] = [
'subplot_demo.py',
]
files['showcase'] = [
'integral_demo.py',
]
files['pylab'] = [
'accented_text.py',
'alignment_test.py',
'annotation_demo.py',
'annotation_demo.py',
'annotation_demo2.py',
'annotation_demo2.py',
'anscombe.py',
'arctest.py',
'arrow_demo.py',
'axes_demo.py',
'axes_props.py',
'axhspan_demo.py',
'axis_equal_demo.py',
'bar_stacked.py',
'barb_demo.py',
'barchart_demo.py',
'barcode_demo.py',
'boxplot_demo.py',
'broken_barh.py',
'clippedline.py',
'cohere_demo.py',
'color_by_yvalue.py',
'color_demo.py',
'colorbar_tick_labelling_demo.py',
'contour_demo.py',
'contour_image.py',
'contour_label_demo.py',
'contourf_demo.py',
'contourf_log.py',
'coords_demo.py',
'coords_report.py',
'csd_demo.py',
'cursor_demo.py',
'custom_cmap.py',
'custom_figure_class.py',
'custom_ticker1.py',
'customize_rc.py',
'dashpointlabel.py',
'date_demo1.py',
'date_demo2.py',
'date_demo_convert.py',
'date_demo_rrule.py',
'date_index_formatter.py',
'dolphin.py',
'ellipse_collection.py',
'ellipse_demo.py',
'ellipse_rotated.py',
'equal_aspect_ratio.py',
'errorbar_limits.py',
'fancyarrow_demo.py',
'fancybox_demo.py',
'fancybox_demo2.py',
'fancytextbox_demo.py',
'figimage_demo.py',
'figlegend_demo.py',
'figure_title.py',
'fill_between_demo.py',
'fill_spiral.py',
'finance_demo.py',
'findobj_demo.py',
'fonts_demo.py',
'fonts_demo_kw.py',
'ganged_plots.py',
'geo_demo.py',
'gradient_bar.py',
'griddata_demo.py',
'hatch_demo.py',
'hexbin_demo.py',
'hexbin_demo2.py',
'hist_colormapped.py',
'histogram_demo_extended.py',
'vline_hline_demo.py',
'image_clip_path.py',
'image_demo.py',
'image_demo2.py',
'image_interp.py',
'image_masked.py',
'image_nonuniform.py',
'image_origin.py',
'image_slices_viewer.py',
'interp_demo.py',
'invert_axes.py',
'layer_images.py',
'legend_auto.py',
'legend_demo.py',
'legend_demo2.py',
'legend_demo3.py',
'legend_scatter.py',
'line_collection.py',
'line_collection2.py',
'line_styles.py',
'log_bar.py',
'log_demo.py',
'log_test.py',
'major_minor_demo1.py',
'major_minor_demo2.py',
'manual_axis.py',
'masked_demo.py',
'mathtext_demo.py',
'mathtext_examples.py',
'matplotlib_icon.py',
'matshow.py',
'mri_demo.py',
'mri_with_eeg.py',
'multi_image.py',
'multiline.py',
'multiple_figs_demo.py',
'nan_test.py',
'newscalarformatter_demo.py',
'pcolor_demo.py',
'pcolor_log.py',
'pcolor_small.py',
'pie_demo2.py',
'plotfile_demo.py',
'polar_demo.py',
'polar_legend.py',
'psd_demo.py',
'psd_demo2.py',
'psd_demo3.py',
'quadmesh_demo.py',
'quiver_demo.py',
'scatter_custom_symbol.py',
'scatter_demo2.py',
'scatter_masked.py',
'scatter_profile.py',
'scatter_star_poly.py',
#'set_and_get.py',
'shared_axis_across_figures.py',
'shared_axis_demo.py',
'simple_plot.py',
'specgram_demo.py',
'spine_placement_demo.py',
'spy_demos.py',
'stem_plot.py',
'step_demo.py',
'stix_fonts_demo.py',
'stock_demo.py',
'subplots_adjust.py',
'symlog_demo.py',
'table_demo.py',
'text_handles.py',
'text_rotation.py',
'text_rotation_relative_to_line.py',
'transoffset.py',
'xcorr_demo.py',
'zorder_demo.py',
]
files['api'] = [
'agg_oo.py',
'barchart_demo.py',
'bbox_intersect.py',
'collections_demo.py',
'colorbar_only.py',
'custom_projection_example.py',
'custom_scale_example.py',
'date_demo.py',
'date_index_formatter.py',
'donut_demo.py',
'font_family_rc.py',
'image_zcoord.py',
'joinstyle.py',
'legend_demo.py',
'line_with_text.py',
'logo2.py',
'mathtext_asarray.py',
'patch_collection.py',
'quad_bezier.py',
'scatter_piecharts.py',
'span_regions.py',
'two_scales.py',
'unicode_minus.py',
'watermark_image.py',
'watermark_text.py',
]
files['units'] = [
'annotate_with_units.py',
#'artist_tests.py', # broken, fixme
'bar_demo2.py',
#'bar_unit_demo.py', # broken, fixme
#'ellipse_with_units.py', # broken, fixme
'radian_demo.py',
'units_sample.py',
#'units_scatter.py', # broken, fixme
]
files['mplot3d'] = [
'2dcollections3d_demo.py',
'bars3d_demo.py',
'contour3d_demo.py',
'contour3d_demo2.py',
'contourf3d_demo.py',
'lines3d_demo.py',
'polys3d_demo.py',
'scatter3d_demo.py',
'surface3d_demo.py',
'surface3d_demo2.py',
'text3d_demo.py',
'wire3d_demo.py',
]
# dict from dir to files we know we don't want to test (eg examples
# not using pyplot, examples requiring user input, animation examples,
# examples that may only work in certain environs (usetex examples?),
# examples that generate multiple figures
excluded = {
'pylab' : ['__init__.py', 'toggle_images.py',],
'units' : ['__init__.py', 'date_support.py',],
}
def report_missing(dir, flist):
'report the py files in dir that are not in flist'
globstr = os.path.join(dir, '*.py')
fnames = glob.glob(globstr)
pyfiles = set([os.path.split(fullpath)[-1] for fullpath in set(fnames)])
exclude = set(excluded.get(dir, []))
flist = set(flist)
missing = list(pyfiles-flist-exclude)
missing.sort()
if missing:
print ('%s files not tested: %s'%(dir, ', '.join(missing)))
def report_all_missing(directories):
for f in directories:
report_missing(dirs[f], files[f])
# tests known to fail on a given backend
failbackend = dict(
svg = ('tex_demo.py', ),
agg = ('hyperlinks.py', ),
pdf = ('hyperlinks.py', ),
ps = ('hyperlinks.py', ),
)
try:
import subprocess
def run(arglist):
try:
ret = subprocess.call(arglist)
except KeyboardInterrupt:
sys.exit()
else:
return ret
except ImportError:
def run(arglist):
os.system(' '.join(arglist))
def drive(backend, directories, python=['python'], switches = []):
exclude = failbackend.get(backend, [])
# Clear the destination directory for the examples
path = backend
if os.path.exists(path):
import glob
for fname in os.listdir(path):
os.unlink(os.path.join(path, fname))
else:
os.mkdir(backend)
failures = []
testcases = [os.path.join(dirs[d], fname)
for d in directories
for fname in files[d]]
for fullpath in testcases:
print ('\tdriving %-40s' % (fullpath)),
sys.stdout.flush()
fpath, fname = os.path.split(fullpath)
if fname in exclude:
print ('\tSkipping %s, known to fail on backend: %s'%backend)
continue
basename, ext = os.path.splitext(fname)
outfile = os.path.join(path, basename)
tmpfile_name = '_tmp_%s.py' % basename
tmpfile = open(tmpfile_name, 'w')
future_imports = 'from __future__ import division, print_function'
for line in open(fullpath):
line_lstrip = line.lstrip()
if line_lstrip.startswith("#"):
tmpfile.write(line)
elif 'unicode_literals' in line:
future_imports = future_imports + ', unicode_literals'
tmpfile.writelines((
future_imports+'\n',
'import sys\n',
'sys.path.append("%s")\n' % fpath.replace('\\', '\\\\'),
'import matplotlib\n',
'matplotlib.use("%s")\n' % backend,
'from pylab import savefig\n',
'import numpy\n',
'numpy.seterr(invalid="ignore")\n',
))
for line in open(fullpath):
line_lstrip = line.lstrip()
if (line_lstrip.startswith('from __future__ import') or
line_lstrip.startswith('matplotlib.use') or
line_lstrip.startswith('savefig') or
line_lstrip.startswith('show')):
continue
tmpfile.write(line)
if backend in rcsetup.interactive_bk:
tmpfile.write('show()')
else:
tmpfile.write('\nsavefig(r"%s", dpi=150)' % outfile)
tmpfile.close()
start_time = time.time()
program = [x % {'name': basename} for x in python]
ret = run(program + [tmpfile_name] + switches)
end_time = time.time()
print ("%s %s" % ((end_time - start_time), ret))
#os.system('%s %s %s' % (python, tmpfile_name, ' '.join(switches)))
os.remove(tmpfile_name)
if ret:
failures.append(fullpath)
return failures
def parse_options():
doc = (__doc__ and __doc__.split('\n\n')) or " "
op = OptionParser(description=doc[0].strip(),
usage='%prog [options] [--] [backends and switches]',
#epilog='\n'.join(doc[1:]) # epilog not supported on my python2.4 machine: JDH
)
op.disable_interspersed_args()
op.set_defaults(dirs='pylab,api,units,mplot3d',
clean=False, coverage=False, valgrind=False)
op.add_option('-d', '--dirs', '--directories', type='string',
dest='dirs', help=dedent('''
Run only the tests in these directories; comma-separated list of
one or more of: pylab (or pylab_examples), api, units, mplot3d'''))
op.add_option('-b', '--backends', type='string', dest='backends',
help=dedent('''
Run tests only for these backends; comma-separated list of
one or more of: agg, ps, svg, pdf, template, cairo,
Default is everything except cairo.'''))
op.add_option('--clean', action='store_true', dest='clean',
help='Remove result directories, run no tests')
op.add_option('-c', '--coverage', action='store_true', dest='coverage',
help='Run in coverage.py')
op.add_option('-v', '--valgrind', action='store_true', dest='valgrind',
help='Run in valgrind')
options, args = op.parse_args()
switches = [x for x in args if x.startswith('--')]
backends = [x.lower() for x in args if not x.startswith('--')]
if options.backends:
backends += [be.lower() for be in options.backends.split(',')]
result = Bunch(
dirs = options.dirs.split(','),
backends = backends or ['agg', 'ps', 'svg', 'pdf', 'template'],
clean = options.clean,
coverage = options.coverage,
valgrind = options.valgrind,
switches = switches)
if 'pylab_examples' in result.dirs:
result.dirs[result.dirs.index('pylab_examples')] = 'pylab'
#print result
return (result)
if __name__ == '__main__':
times = {}
failures = {}
options = parse_options()
if options.clean:
localdirs = [d for d in glob.glob('*') if os.path.isdir(d)]
all_backends_set = set(all_backends)
for d in localdirs:
if d.lower() not in all_backends_set:
continue
print ('removing %s'%d)
for fname in glob.glob(os.path.join(d, '*')):
os.remove(fname)
os.rmdir(d)
for fname in glob.glob('_tmp*.py'):
os.remove(fname)
print ('all clean...')
raise SystemExit
if options.coverage:
python = ['coverage.py', '-x']
elif options.valgrind:
python = ['valgrind', '--tool=memcheck', '--leak-check=yes',
'--log-file=%(name)s', sys.executable]
elif sys.platform == 'win32':
python = [sys.executable]
else:
python = [sys.executable]
report_all_missing(options.dirs)
for backend in options.backends:
print ('testing %s %s' % (backend, ' '.join(options.switches)))
t0 = time.time()
failures[backend] = \
drive(backend, options.dirs, python, options.switches)
t1 = time.time()
times[backend] = (t1-t0)/60.0
# print times
for backend, elapsed in times.items():
print ('Backend %s took %1.2f minutes to complete' % (backend, elapsed))
failed = failures[backend]
if failed:
print (' Failures: %s' % failed)
if 'template' in times:
print ('\ttemplate ratio %1.3f, template residual %1.3f' % (
elapsed/times['template'], elapsed-times['template']))
| unlicense |
RemoteConnectionManager/RCM_spack_deploy | scripts/test.py | 1 | 2425 | import utils
import os
import logging
#################
if __name__ == '__main__':
import tempfile
import shutil
print("__file__:" + os.path.realpath(__file__))
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
#logging.debug('This message should appear on the console')
#logging.info('So should this')
#logging.warning('And this, too')
print("########### "+__name__)
ll = logging.getLogger('@@'+__name__)
ll.setLevel(logging.DEBUG)
for h in ll.handlers :
h.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s %(name)s[%(filename)s:%(lineno)s ] %(message)s")
h.setFormatter(formatter)
for k,v in utils.baseintrospect().sysintro.items() : print("sysintro["+ k +"]=" + v )
me=utils.myintrospect(tags={'calori': 'ws_mint', 'galileo':'galileo', 'marconi':'marconi', 'eni':'eni' })
for k,v in me.commands.items() : print("commands["+ k +"]=" + v )
print("myintrospection: host->" + me.platform_tag())
root=os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'cache')
print("root:" + root)
l = utils.LinkTree(root,maxdepth=2)
dest=tempfile.mkdtemp()
l.merge(dest)
ll.info("Num folders in dest: " + str(len(os.listdir(dest))))
shutil.rmtree(dest, ignore_errors=True)
#for h in ll.handlers:
# h.setFormatter("[%(filename)s:%(lineno)s - %(funcName)20s() %(asctime)s] %(message)s")
# h.setLevel(logging.DEBUG)
#origin='https://github.com/RemoteConnectionManager/RCM_spack_deploy.git'
#branches=['master']
#origin='https://github.com/RemoteConnectionManager/spack.git'
#branches=['clean/develop']
for origin,branches in [
#('https://github.com/RemoteConnectionManager/spack.git',['clean/develop']),
#('https://github.com/RemoteConnectionManager/RCM_spack_deploy.git',['master'])
] :
dest=tempfile.mkdtemp()
ll.info("creating TEMP dir ->" + dest)
repo=utils.git_repo(dest,logger=ll)
origin_branches = utils.get_branches(origin, branch_selection=branches)
repo.init()
repo.add_remote(origin, name='origin', fetch_branches=origin_branches)
repo.fetch(name='origin',branches=origin_branches)
repo.checkout(origin_branches[0])
ll.info(os.listdir(dest))
shutil.rmtree(dest, ignore_errors=True)
| lgpl-3.0 |
TeamExodus/external_chromium_org | third_party/markdown/treeprocessors.py | 104 | 14665 | # markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child,False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + node.getchildren():
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = currElement.getchildren().index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.getiterator('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = pre[0].text.rstrip() + '\n'
| bsd-3-clause |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/test/test_list.py | 90 | 4222 | import sys
from test import support, list_tests
import pickle
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = self.type2test([4, 5, 6, 7])
it = itorg = iter(data)
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it)
self.assertEqual(self.type2test(it), self.type2test(data)[1:])
def test_reversed_pickle(self):
data = self.type2test([4, 5, 6, 7])
it = itorg = reversed(data)
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(reversed(data)))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it)
self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
def test_main(verbose=None):
support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-2.0 |
horance-liu/tensorflow | tensorflow/contrib/sparsemax/python/ops/sparsemax_loss.py | 103 | 2246 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax Loss op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = ["sparsemax_loss"]
def sparsemax_loss(logits, sparsemax, labels, name=None):
"""Computes sparsemax loss function [1].
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
sparsemax: A `Tensor`. Must have the same type as `logits`.
labels: A `Tensor`. Must have the same type as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax_loss",
[logits, sparsemax, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
labels = ops.convert_to_tensor(labels, name="labels")
shifted_logits = logits - \
math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis]
# sum over support
support = math_ops.cast(sparsemax > 0, sparsemax.dtype)
sum_s = support * sparsemax * (shifted_logits - 0.5 * sparsemax)
# - z_k + ||q||^2
q_part = labels * (0.5 * labels - shifted_logits)
return math_ops.reduce_sum(sum_s + q_part, axis=1)
| apache-2.0 |
rrrene/django | django/core/management/commands/runserver.py | 203 | 7383 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
rebstar6/servo | tests/wpt/web-platform-tests/referrer-policy/generic/tools/spec_validator.py | 326 | 6673 | #!/usr/bin/env python
import json, sys
from common_paths import *
def assert_non_empty_string(obj, field):
assert field in obj, 'Missing field "%s"' % field
assert isinstance(obj[field], basestring), \
'Field "%s" must be a string' % field
assert len(obj[field]) > 0, 'Field "%s" must not be empty' % field
def assert_non_empty_list(obj, field):
assert isinstance(obj[field], list), \
'%s must be a list' % field
assert len(obj[field]) > 0, \
'%s list must not be empty' % field
def assert_non_empty_dict(obj, field):
assert isinstance(obj[field], dict), \
'%s must be a dict' % field
assert len(obj[field]) > 0, \
'%s dict must not be empty' % field
def assert_contains(obj, field):
assert field in obj, 'Must contain field "%s"' % field
def assert_value_from(obj, field, items):
assert obj[field] in items, \
'Field "%s" must be from: %s' % (field, str(items))
def assert_atom_or_list_items_from(obj, field, items):
if isinstance(obj[field], basestring) or isinstance(obj[field], int):
assert_value_from(obj, field, items)
return
assert_non_empty_list(obj, field)
for allowed_value in obj[field]:
assert allowed_value != '*', "Wildcard is not supported for lists!"
assert allowed_value in items, \
'Field "%s" must be from: %s' % (field, str(items))
def assert_contains_only_fields(obj, expected_fields):
for expected_field in expected_fields:
assert_contains(obj, expected_field)
for actual_field in obj:
assert actual_field in expected_fields, \
'Unexpected field "%s".' % actual_field
def assert_value_unique_in(value, used_values):
assert value not in used_values, 'Duplicate value "%s"!' % str(value)
used_values[value] = True
def validate(spec_json, details):
""" Validates the json specification for generating tests. """
details['object'] = spec_json
assert_contains_only_fields(spec_json, ["specification",
"referrer_policy_schema",
"test_expansion_schema",
"subresource_path",
"excluded_tests"])
assert_non_empty_list(spec_json, "specification")
assert_non_empty_list(spec_json, "referrer_policy_schema")
assert_non_empty_dict(spec_json, "test_expansion_schema")
assert_non_empty_list(spec_json, "excluded_tests")
specification = spec_json['specification']
referrer_policy_schema = spec_json['referrer_policy_schema']
test_expansion_schema = spec_json['test_expansion_schema']
excluded_tests = spec_json['excluded_tests']
subresource_path = spec_json['subresource_path']
valid_test_expansion_fields = ['name'] + test_expansion_schema.keys()
# Validate each single spec.
for spec in specification:
details['object'] = spec
# Validate required fields for a single spec.
assert_contains_only_fields(spec, ['name',
'title',
'description',
'referrer_policy',
'specification_url',
'test_expansion'])
assert_non_empty_string(spec, 'name')
assert_non_empty_string(spec, 'title')
assert_non_empty_string(spec, 'description')
assert_non_empty_string(spec, 'specification_url')
assert_value_from(spec, 'referrer_policy', referrer_policy_schema)
assert_non_empty_list(spec, 'test_expansion')
# Validate spec's test expansion.
used_spec_names = {}
for spec_exp in spec['test_expansion']:
details['object'] = spec_exp
assert_non_empty_string(spec_exp, 'name')
# The name is unique in same expansion group.
assert_value_unique_in((spec_exp['expansion'], spec_exp['name']),
used_spec_names)
assert_contains_only_fields(spec_exp, valid_test_expansion_fields)
for artifact in test_expansion_schema:
details['test_expansion_field'] = artifact
assert_atom_or_list_items_from(
spec_exp, artifact, ['*'] + test_expansion_schema[artifact])
del details['test_expansion_field']
# Validate the test_expansion schema members.
details['object'] = test_expansion_schema
assert_contains_only_fields(test_expansion_schema, ['expansion',
'delivery_method',
'redirection',
'origin',
'source_protocol',
'target_protocol',
'subresource',
'referrer_url'])
# Validate excluded tests.
details['object'] = excluded_tests
for excluded_test_expansion in excluded_tests:
assert_contains_only_fields(excluded_test_expansion,
valid_test_expansion_fields)
details['object'] = excluded_test_expansion
for artifact in test_expansion_schema:
details['test_expansion_field'] = artifact
assert_atom_or_list_items_from(
excluded_test_expansion,
artifact,
['*'] + test_expansion_schema[artifact])
del details['test_expansion_field']
# Validate subresource paths.
details['object'] = subresource_path
assert_contains_only_fields(subresource_path,
test_expansion_schema['subresource']);
for subresource in subresource_path:
local_rel_path = "." + subresource_path[subresource]
full_path = os.path.join(test_root_directory, local_rel_path)
assert os.path.isfile(full_path), "%s is not an existing file" % path
del details['object']
def assert_valid_spec_json(spec_json):
error_details = {}
try:
validate(spec_json, error_details)
except AssertionError, err:
print 'ERROR:', err.message
print json.dumps(error_details, indent=4)
sys.exit(1)
def main():
spec_json = load_spec_json();
assert_valid_spec_json(spec_json)
print "Spec JSON is valid."
if __name__ == '__main__':
main()
| mpl-2.0 |
jyotikamboj/container | django/views/i18n.py | 35 | 11024 | import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.apps import apps
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language, LANGUAGE_SESSION_KEY
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| mit |
boehm-s/dotfiles | .emacs.d/elpy/rpc-venv/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py | 5 | 36263 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url
if six.PY2:
# Queue is imported for side effects on MS Windows
import Queue as _unused_module_Queue # noqa: F401
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = queue.LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _ipv6_host(host).lower()
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTP connection (%d): %s",
self.num_connections, self.host)
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
method, url, http_version, httplib_response.status,
httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
body_pos=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw['request_method'] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError, CertificateError) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError) as e:
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader('Retry-After'))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self._proxy_host, self.port)
else:
set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTPS connection (%d): %s",
self.num_connections, self.host)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _ipv6_host(host):
"""
Process IPv6 address literals
"""
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
#
# Also if an IPv6 address literal has a zone identifier, the
# percent sign might be URIencoded, convert it back into ASCII
if host.startswith('[') and host.endswith(']'):
host = host.replace('%25', '%').strip('[]')
return host
| gpl-3.0 |
lamblin/fuel | setup.py | 2 | 1368 | """Installation script."""
from os import path
from setuptools import find_packages, setup
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst')) as f:
LONG_DESCRIPTION = f.read().strip()
setup(
name='fuel',
version='0.0.1', # PEP 440 compliant
description='Data pipeline framework for machine learning',
long_description=LONG_DESCRIPTION,
url='https://github.com/mila-udem/fuel.git',
author='Universite de Montreal',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='dataset data iteration pipeline processing',
packages=find_packages(exclude=['tests']),
install_requires=['six', 'picklable_itertools', 'pyyaml', 'h5py',
'tables', 'progressbar2', 'pyzmq'],
extras_require={
'test': ['nose', 'nose2', 'mock']
},
scripts=['bin/fuel-convert', 'bin/fuel-download', 'bin/fuel-info']
)
| mit |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/smtpd.py | 174 | 18543 | #! /usr/bin/env python
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <[email protected]>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <[email protected]> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| gpl-3.0 |
usakhelo/FreeCAD | src/Mod/Start/StartPage/ArchDesign.py | 32 | 1805 | #***************************************************************************
#* *
#* Copyright (c) 2012 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCADGui
FreeCADGui.activateWorkbench("ArchWorkbench")
App.newDocument()
| lgpl-2.1 |
pxsdirac/tushare | tushare/datayes/master.py | 17 | 4457 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Master():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def SecID(self, assetClass='', cnSpell='', partyID='', ticker='', field=''):
"""
通过机构partyID,或证券简称拼音cnSpell,或证券交易代码ticker,
检索证券ID(证券在数据结构中的一个唯一识别的编码),
也可通过证券简称拼音cnSpell检索证券交易代码ticker等;同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。
"""
code, result = self.client.getData(vs.SECID%(assetClass, cnSpell, partyID, ticker, field))
return _ret_data(code, result)
def TradeCal(self, exchangeCD='', beginDate='', endDate='', field=''):
"""
记录了上海证券交易所,深圳证券交易所,中国银行间市场,大连商品交易所,郑州商品交易所,上海期货交易所,
中国金融期货交易所和香港交易所等交易所在日历日期当天是否开市的信息,
其中上证、深证记录了自成立以来的全部日期是否开始信息。各交易日节假日安排通知发布当天即更新数据。
"""
code, result = self.client.getData(vs.TRADECAL%(exchangeCD, beginDate, endDate, field))
return _ret_data(code, result)
def Industry(self, industryVersion='', industryVersionCD='', industryLevel='', isNew='', field=''):
"""
输入行业分类通联编码(如,010303表示申万行业分类2014版)或输入一个行业分类标准名称,获取行业分类标准下行业划分
"""
code, result = self.client.getData(vs.INDUSTRY%(industryVersion, industryVersionCD,
industryLevel, isNew, field))
return _ret_data(code, result)
def SecTypeRel(self, secID='', ticker='', typeID='', field=''):
"""
记录证券每个分类的成分,证券分类可通过在getSecType获取。
"""
code, result = self.client.getData(vs.SECTYPEREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def EquInfo(self, ticker='', field=''):
"""
根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。
"""
code, result = self.client.getData(vs.EQUINFO%(ticker, field))
return _ret_data(code, result)
def SecTypeRegionRel(self, secID='', ticker='', typeID='', field=''):
"""
获取沪深股票地域分类,以注册地所在行政区域为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGIONREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def SecType(self, field=''):
"""
证券分类列表,一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型;可一次获取全部分类。
"""
code, result = self.client.getData(vs.SECTYPE%(field))
return _ret_data(code, result)
def SecTypeRegion(self, field=''):
"""
获取中国地域分类,以行政划分为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGION%(field))
return _ret_data(code, result)
def SysCode(self, codeTypeID='', valueCD='', field=''):
"""
各api接口有枚举值特性的输出列,如getSecID输出项exchangeCD值,编码分别代表的是什么市场,所有枚举值都可以在这个接口获取。
"""
code, result = self.client.getData(vs.SYSCODE%(codeTypeID, valueCD, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
pabloborrego93/edx-platform | lms/djangoapps/courseware/tests/test_masquerade.py | 7 | 18408 | """
Unit tests for masquerade.
"""
import json
import pickle
from mock import patch
from nose.plugins.attrib import attr
from datetime import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from django.utils.timezone import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import (
CourseMasquerade,
MasqueradingKeyValueStore,
handle_ajax,
setup_masquerade,
get_masquerading_group_info
)
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC())})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.sequential.location.name,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def _create_mock_json_request(self, user, data, method='POST', session=None):
"""
Returns a mock JSON request for the specified user
"""
factory = RequestFactory()
request = factory.generic(method, '/', content_type='application/json', data=json.dumps(data))
request.user = user
request.session = session or {}
return request
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertIn(self.sequential_display_name, content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertIn(self.problem_display_name, problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
def verify_real_user_profile_link(self):
"""
Verifies that the 'Profile' link in the navigation dropdown is pointing
to the real user.
"""
content = self.get_courseware_page().content
self.assertIn(
'<a href="/u/{}" role="menuitem" class="action dropdown-menuitem">Profile</a>'.format(
self.test_user.username
),
content,
"Profile link should point to real user",
)
@attr(shard=1)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@attr(shard=1)
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content)
progress = '%s/%s' % (str(json_data['current_score']), str(json_data['total_possible']))
return progress
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
# Answer correctly as the student, and check progress.
self.login_student()
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=self.student_user.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Verify that the user dropdown links have not changed
self.verify_real_user_profile_link()
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.login_student()
self.assertEqual(self.get_progress_detail(), u'2/2')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
@attr(shard=1)
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
# Verify that there is no masquerading group initially
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertIsNone(group_id)
self.assertIsNone(user_partition_id)
# Install a masquerading group
request = self._create_mock_json_request(
self.test_user,
data={"role": "student", "user_partition_id": 0, "group_id": 1}
)
response = handle_ajax(request, unicode(self.course.id))
self.assertEquals(response.status_code, 200)
setup_masquerade(request, self.course.id, True)
# Verify that the masquerading group is returned
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertEqual(group_id, 1)
self.assertEqual(user_partition_id, 0)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
| agpl-3.0 |
renmengye/imageqa-public | src/imageqa_modelavg.py | 1 | 4250 | import sys
import os
import numpy as np
import imageqa_test as it
import nn
def runAvgAll(models, data):
print 'Running model %s' % modelId
modelOutput = nn.test(model, data['testData'][0])
modelOutputs.append(modelOutput)
finalOutput = np.zeros(modelOutputs[0].shape)
for output in modelOutputs:
shape0 = min(finalOutput.shape[0], output.shape[0])
shape1 = min(finalOutput.shape[1], output.shape[1])
finalOutput[:shape0, :shape1] += output[:shape0, :shape1] / float(len(modelOutputs))
return finalOutput
def testAvgAll(modelOutputs, mixRatio, data, outputFolder):
# finalOutput = mixRatio * modelOutputs[0] + \
# (1 - mixRatio) * modelOutputs[1]
finalOutput = np.zeros(modelOutputs[0].shape)
for output in modelOutputs:
shape0 = min(finalOutput.shape[0], output.shape[0])
shape1 = min(finalOutput.shape[1], output.shape[1])
finalOutput[:shape0, :shape1] += output[:shape0, :shape1] / float(len(modelOutputs))
testAnswerFile = it.getAnswerFilename(outputFolder, resultsFolder)
testTruthFile = it.getTruthFilename(outputFolder, resultsFolder)
resultsRank, \
resultsCategory, \
resultsWups = it.runAllMetrics(
data['testData'][0],
finalOutput,
data['testData'][1],
data['ansIdict'],
data['questionTypeArray'],
testAnswerFile,
testTruthFile)
it.writeMetricsToFile(
outputFolder,
resultsRank,
resultsCategory,
resultsWups,
resultsFolder)
def testAvg(modelOutputs, mixRatio, target):
finalOutput = mixRatio * modelOutputs[0] + \
(1 - mixRatio) * modelOutputs[1]
rate, _, __ = it.calcPrecision(finalOutput, target)
return rate
def validAvg(modelOutputs, mixRatios, target):
bestRate = 0.0
bestMixRatio = 0.0
for mixRatio in mixRatios:
rate = testAvg(modelOutputs, mixRatio, target)
print 'Mix ratio %.4f Rate %.4f' % (mixRatio, rate)
if rate > bestRate:
bestMixRatio = mixRatio
bestRate = rate
return bestMixRatio
if __name__ == '__main__':
"""
Usage: python imageqa_modelavg.py
-m[odel] {modelId1}
-m[odel] {modelId2}
-vm[odel] {validModelId1}
-vm[odel] {validModelId2}
-d[ata] {dataFolder}
-o[utput] {outputFolder}
[-r[esults] {resultsFolder}]
"""
resultsFolder = '../results'
modelIds = []
validModelIds = []
for i, flag in enumerate(sys.argv):
if flag == '-m' or flag == '-model':
modelIds.append(sys.argv[i + 1])
elif flag == '-vm' or flag == '-vmodel':
validModelIds.append(sys.argv[i + 1])
elif flag == '-r' or flag == '-results':
resultsFolder = sys.argv[i + 1]
elif flag == '-d' or flag == '-data':
dataFolder = sys.argv[i + 1]
elif flag == '-o' or flag == '-output':
outputFolder = sys.argv[i + 1]
data = it.loadDataset(dataFolder)
models = []
validModels = []
for modelId in modelIds:
print 'Loading model %s' % modelId
models.append(it.loadModel(modelId, resultsFolder))
for modelId in validModelIds:
print 'Loading model %s' % modelId
validModels.append(it.loadModel(modelId, resultsFolder))
modelOutputs = []
validModelOutputs = []
# for modelId, model in zip(validModelIds, validModels):
# print 'Running model %s' % modelId
# modelOutput = nn.test(model, data['validData'][0])
# validModelOutputs.append(modelOutput)
#
# mixRatios = np.arange(0, 11) * 0.1
# bestMixRatio = validAvg(validModelOutputs, mixRatios, data['validData'][1])
# print 'Best ratio found: %.4f' % bestMixRatio
bestMixRatio = 0.5
shape = None
for modelId, model in zip(modelIds, models):
print 'Running model %s' % modelId
modelOutput = nn.test(model, data['testData'][0])
if shape is None:
shape = modelOutput.shape
else:
modelOutput = modelOutput[:shape[0],:shape[1]]
modelOutputs.append(modelOutput)
testAvgAll(modelOutputs, bestMixRatio, data, outputFolder)
| mit |
nhicher/ansible | lib/ansible/modules/network/nxos/nxos_bgp_neighbor_af.py | 29 | 26338 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bgp_neighbor_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP address-family's neighbors configuration.
description:
- Manages BGP address-family's neighbors configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the whole BGP address-family's
neighbor configuration.
- Default, when supported, removes properties
- In order to default maximum-prefix configuration, only
C(max_prefix_limit=default) is needed.
options:
asn:
description:
- BGP autonomous system number. Valid values are String,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
default: default
neighbor:
description:
- Neighbor Identifier. Valid values are string. Neighbors may use
IPv4 or IPv6 notation, with or without prefix length.
required: true
afi:
description:
- Address Family Identifier.
required: true
choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']
safi:
description:
- Sub Address Family Identifier.
required: true
choices: ['unicast','multicast', 'evpn']
additional_paths_receive:
description:
- Valid values are enable for basic command enablement; disable
for disabling the command at the neighbor af level
(it adds the disable keyword to the basic command); and inherit
to remove the command at this level (the command value is
inherited from a higher BGP layer).
choices: ['enable','disable', 'inherit']
additional_paths_send:
description:
- Valid values are enable for basic command enablement; disable
for disabling the command at the neighbor af level
(it adds the disable keyword to the basic command); and inherit
to remove the command at this level (the command value is
inherited from a higher BGP layer).
choices: ['enable','disable', 'inherit']
advertise_map_exist:
description:
- Conditional route advertisement. This property requires two
route maps, an advertise-map and an exist-map. Valid values are
an array specifying both the advertise-map name and the exist-map
name, or simply 'default' e.g. ['my_advertise_map',
'my_exist_map']. This command is mutually exclusive with the
advertise_map_non_exist property.
advertise_map_non_exist:
description:
- Conditional route advertisement. This property requires two
route maps, an advertise-map and an exist-map. Valid values are
an array specifying both the advertise-map name and the
non-exist-map name, or simply 'default' e.g.
['my_advertise_map', 'my_non_exist_map']. This command is mutually
exclusive with the advertise_map_exist property.
allowas_in:
description:
- Activate allowas-in property
allowas_in_max:
description:
- Max-occurrences value for allowas_in. Valid values are
an integer value or 'default'. This is mutually exclusive with
allowas_in.
as_override:
description:
- Activate the as-override feature.
type: bool
default_originate:
description:
- Activate the default-originate feature.
type: bool
default_originate_route_map:
description:
- Route-map for the default_originate property.
Valid values are a string defining a route-map name,
or 'default'. This is mutually exclusive with
default_originate.
disable_peer_as_check:
description:
- Disable checking of peer AS-number while advertising
type: bool
version_added: 2.5
filter_list_in:
description:
- Valid values are a string defining a filter-list name,
or 'default'.
filter_list_out:
description:
- Valid values are a string defining a filter-list name,
or 'default'.
max_prefix_limit:
description:
- maximum-prefix limit value. Valid values are an integer value
or 'default'.
max_prefix_interval:
description:
- Optional restart interval. Valid values are an integer.
Requires max_prefix_limit. May not be combined with max_prefix_warning.
max_prefix_threshold:
description:
- Optional threshold percentage at which to generate a warning.
Valid values are an integer value.
Requires max_prefix_limit.
max_prefix_warning:
description:
- Optional warning-only keyword. Requires max_prefix_limit. May not be
combined with max_prefix_interval.
type: bool
next_hop_self:
description:
- Activate the next-hop-self feature.
type: bool
next_hop_third_party:
description:
- Activate the next-hop-third-party feature.
type: bool
prefix_list_in:
description:
- Valid values are a string defining a prefix-list name,
or 'default'.
prefix_list_out:
description:
- Valid values are a string defining a prefix-list name,
or 'default'.
route_map_in:
description:
- Valid values are a string defining a route-map name,
or 'default'.
route_map_out:
description:
- Valid values are a string defining a route-map name,
or 'default'.
route_reflector_client:
description:
- Router reflector client.
type: bool
send_community:
description:
- send-community attribute.
choices: ['none', 'both', 'extended', 'standard', 'default']
soft_reconfiguration_in:
description:
- Valid values are 'enable' for basic command enablement; 'always'
to add the always keyword to the basic command; and 'inherit' to
remove the command at this level (the command value is inherited
from a higher BGP layer).
choices: ['enable','always','inherit']
soo:
description:
- Site-of-origin. Valid values are a string defining a VPN
extcommunity or 'default'.
suppress_inactive:
description:
- suppress-inactive feature.
type: bool
unsuppress_map:
description:
- unsuppress-map. Valid values are a string defining a route-map
name or 'default'.
weight:
description:
- Weight value. Valid values are an integer value or 'default'.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: configure RR client
nxos_bgp_neighbor_af:
asn: 65535
neighbor: '192.0.2.3'
afi: ipv4
safi: unicast
route_reflector_client: true
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "neighbor 192.0.2.3",
"address-family ipv4 unicast", "route-reflector-client"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'allowas_in',
'as_override',
'default_originate',
'disable_peer_as_check',
'next_hop_self',
'next_hop_third_party',
'route_reflector_client',
'suppress_inactive'
]
PARAM_TO_COMMAND_KEYMAP = {
'afi': 'address-family',
'asn': 'router bgp',
'neighbor': 'neighbor',
'additional_paths_receive': 'capability additional-paths receive',
'additional_paths_send': 'capability additional-paths send',
'advertise_map_exist': 'advertise-map exist-map',
'advertise_map_non_exist': 'advertise-map non-exist-map',
'allowas_in': 'allowas-in',
'allowas_in_max': 'allowas-in',
'as_override': 'as-override',
'default_originate': 'default-originate',
'default_originate_route_map': 'default-originate route-map',
'disable_peer_as_check': 'disable-peer-as-check',
'filter_list_in': 'filter-list in',
'filter_list_out': 'filter-list out',
'max_prefix_limit': 'maximum-prefix',
'max_prefix_interval': 'maximum-prefix interval',
'max_prefix_threshold': 'maximum-prefix threshold',
'max_prefix_warning': 'maximum-prefix warning',
'next_hop_self': 'next-hop-self',
'next_hop_third_party': 'next-hop-third-party',
'prefix_list_in': 'prefix-list in',
'prefix_list_out': 'prefix-list out',
'route_map_in': 'route-map in',
'route_map_out': 'route-map out',
'route_reflector_client': 'route-reflector-client',
'safi': 'address-family',
'send_community': 'send-community',
'soft_reconfiguration_in': 'soft-reconfiguration inbound',
'soo': 'soo',
'suppress_inactive': 'suppress-inactive',
'unsuppress_map': 'unsuppress-map',
'weight': 'weight',
'vrf': 'vrf'
}
def get_value(arg, config, module):
custom = [
'additional_paths_send',
'additional_paths_receive',
'max_prefix_limit',
'max_prefix_interval',
'max_prefix_threshold',
'max_prefix_warning',
'send_community',
'soft_reconfiguration_in'
]
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'^\s+{0}\s*'.format(command), config, re.M)
has_command_val = re.search(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if arg in custom:
value = get_custom_value(arg, config, module)
elif arg == 'next_hop_third_party':
has_no_command = re.search(r'^\s+no\s+{0}\s*$'.format(command), config, re.M)
value = False
if not has_no_command:
value = True
elif arg in BOOL_PARAMS:
value = False
if has_command:
value = True
elif command.startswith('advertise-map'):
value = []
has_adv_map = re.search(r'{0}\s(?P<value1>.*)\s{1}\s(?P<value2>.*)$'.format(*command.split()), config, re.M)
if has_adv_map:
value = list(has_adv_map.groups())
elif command.split()[0] in ['filter-list', 'prefix-list', 'route-map']:
has_cmd_direction_val = re.search(r'{0}\s(?P<value>.*)\s{1}$'.format(*command.split()), config, re.M)
if has_cmd_direction_val:
value = has_cmd_direction_val.group('value')
elif has_command_val:
value = has_command_val.group('value')
return value
def get_custom_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
splitted_config = config.splitlines()
value = ''
command_re = re.compile(r'\s+{0}\s*'.format(command), re.M)
has_command = command_re.search(config)
command_val_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
has_command_val = command_val_re.search(config)
if arg.startswith('additional_paths'):
value = 'inherit'
for line in splitted_config:
if command in line:
if 'disable' in line:
value = 'disable'
else:
value = 'enable'
elif arg.startswith('max_prefix'):
for line in splitted_config:
if 'maximum-prefix' in line:
splitted_line = line.split()
if arg == 'max_prefix_limit':
value = splitted_line[1]
elif arg == 'max_prefix_interval' and 'restart' in line:
value = splitted_line[-1]
elif arg == 'max_prefix_threshold' and len(splitted_line) > 2:
try:
int(splitted_line[2])
value = splitted_line[2]
except ValueError:
value = ''
elif arg == 'max_prefix_warning':
value = 'warning-only' in line
elif arg == 'soft_reconfiguration_in':
value = 'inherit'
for line in splitted_config:
if command in line:
if 'always' in line:
value = 'always'
else:
value = 'enable'
elif arg == 'send_community':
value = 'none'
for line in splitted_config:
if command in line:
if 'extended' in line:
if value == 'standard':
value = 'both'
else:
value = 'extended'
elif 'both' in line:
value = 'both'
else:
value = 'standard'
return value
def get_existing(module, args, warnings):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
asn_regex = re.compile(r'.*router\sbgp\s(?P<existing_asn>\d+(\.\d+)?).*', re.S)
match_asn = asn_regex.match(str(netcfg))
if match_asn:
existing_asn = match_asn.group('existing_asn')
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
parents.append('address-family {0} {1}'.format(module.params['afi'], module.params['safi']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'vrf', 'neighbor', 'afi', 'safi']:
existing[arg] = get_value(arg, config, module)
existing['asn'] = existing_asn
existing['neighbor'] = module.params['neighbor']
existing['vrf'] = module.params['vrf']
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
else:
warnings.append("The BGP process didn't exist but the task just created it.")
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def get_default_command(key, value, existing_commands):
command = ''
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if value == 'inherit':
if existing_value != 'inherit':
command = 'no {0}'.format(key)
else:
if key == 'advertise-map exist-map':
command = 'no advertise-map {0} exist-map {1}'.format(
existing_value[0], existing_value[1])
elif key == 'advertise-map non-exist-map':
command = 'no advertise-map {0} non-exist-map {1}'.format(
existing_value[0], existing_value[1])
elif key == 'filter-list in':
command = 'no filter-list {0} in'.format(existing_value)
elif key == 'filter-list out':
command = 'no filter-list {0} out'.format(existing_value)
elif key == 'prefix-list in':
command = 'no prefix-list {0} in'.format(existing_value)
elif key == 'prefix-list out':
command = 'no prefix-list {0} out'.format(existing_value)
elif key == 'route-map in':
command = 'no route-map {0} in'.format(existing_value)
elif key == 'route-map out':
command = 'no route-map {0} out'.format(existing_value)
elif key.startswith('maximum-prefix'):
command = 'no maximum-prefix'
elif key == 'allowas-in max':
command = ['no allowas-in {0}'.format(existing_value)]
command.append('allowas-in')
else:
command = 'no {0} {1}'.format(key, existing_value)
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
command = 'no {0}'.format(key)
return command
def fix_proposed(module, existing, proposed):
allowas_in = proposed.get('allowas_in')
allowas_in_max = proposed.get('allowas_in_max')
if allowas_in_max and not allowas_in:
proposed.pop('allowas_in_max')
elif allowas_in and allowas_in_max:
proposed.pop('allowas_in')
if existing.get('send_community') == 'none' and proposed.get('send_community') == 'default':
proposed.pop('send_community')
return proposed
def state_present(module, existing, proposed, candidate):
commands = list()
proposed = fix_proposed(module, existing, proposed)
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value in ['inherit', 'default']:
command = get_default_command(key, value, existing_commands)
if isinstance(command, str):
if command and command not in commands:
commands.append(command)
elif isinstance(command, list):
for cmd in command:
if cmd not in commands:
commands.append(cmd)
elif key.startswith('maximum-prefix'):
if module.params['max_prefix_limit'] != 'default':
command = 'maximum-prefix {0}'.format(module.params['max_prefix_limit'])
if module.params['max_prefix_threshold']:
command += ' {0}'.format(module.params['max_prefix_threshold'])
if module.params['max_prefix_interval']:
command += ' restart {0}'.format(module.params['max_prefix_interval'])
elif module.params['max_prefix_warning']:
command += ' warning-only'
commands.append(command)
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif key == 'address-family':
commands.append("address-family {0} {1}".format(module.params['afi'], module.params['safi']))
elif key.startswith('capability additional-paths'):
command = key
if value == 'disable':
command += ' disable'
commands.append(command)
elif key.startswith('advertise-map'):
direction = key.split()[1]
commands.append('advertise-map {1} {0} {2}'.format(direction, *value))
elif key.split()[0] in ['filter-list', 'prefix-list', 'route-map']:
commands.append('{1} {0} {2}'.format(value, *key.split()))
elif key == 'soft-reconfiguration inbound':
command = ''
if value == 'enable':
command = key
elif value == 'always':
command = '{0} {1}'.format(key, value)
commands.append(command)
elif key == 'send-community':
command = key
if value in ['standard', 'extended']:
commands.append('no ' + key + ' both')
command += ' {0}'.format(value)
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['router bgp {0}'.format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
af_command = 'address-family {0} {1}'.format(
module.params['afi'], module.params['safi'])
parents.append(af_command)
if af_command in commands:
commands.remove(af_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
commands.append('no address-family {0} {1}'.format(
module.params['afi'], module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
neighbor=dict(required=True, type='str'),
afi=dict(required=True, type='str'),
safi=dict(required=True, type='str'),
additional_paths_receive=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
additional_paths_send=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
advertise_map_exist=dict(required=False, type='list'),
advertise_map_non_exist=dict(required=False, type='list'),
allowas_in=dict(required=False, type='bool'),
allowas_in_max=dict(required=False, type='str'),
as_override=dict(required=False, type='bool'),
default_originate=dict(required=False, type='bool'),
default_originate_route_map=dict(required=False, type='str'),
disable_peer_as_check=dict(required=False, type='bool'),
filter_list_in=dict(required=False, type='str'),
filter_list_out=dict(required=False, type='str'),
max_prefix_limit=dict(required=False, type='str'),
max_prefix_interval=dict(required=False, type='str'),
max_prefix_threshold=dict(required=False, type='str'),
max_prefix_warning=dict(required=False, type='bool'),
next_hop_self=dict(required=False, type='bool'),
next_hop_third_party=dict(required=False, type='bool'),
prefix_list_in=dict(required=False, type='str'),
prefix_list_out=dict(required=False, type='str'),
route_map_in=dict(required=False, type='str'),
route_map_out=dict(required=False, type='str'),
route_reflector_client=dict(required=False, type='bool'),
send_community=dict(required=False, choices=['none', 'both', 'extended', 'standard', 'default']),
soft_reconfiguration_in=dict(required=False, type='str', choices=['enable', 'always', 'inherit']),
soo=dict(required=False, type='str'),
suppress_inactive=dict(required=False, type='bool'),
unsuppress_map=dict(required=False, type='str'),
weight=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['advertise_map_exist', 'advertise_map_non_exist'],
['max_prefix_interval', 'max_prefix_warning'],
['default_originate', 'default_originate_route_map'],
['allowas_in', 'allowas_in_max']],
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
for key in ['max_prefix_interval', 'max_prefix_warning', 'max_prefix_threshold']:
if module.params[key] and not module.params['max_prefix_limit']:
module.fail_json(
msg='max_prefix_limit is required when using %s' % key
)
if module.params['vrf'] == 'default' and module.params['soo']:
module.fail_json(msg='SOO is only allowed in non-default VRF')
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args, warnings)
if existing.get('asn') and state == 'present':
if existing.get('asn') != module.params['asn']:
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
for param in ['advertise_map_exist', 'advertise_map_non_exist']:
if module.params[param] == ['default']:
module.params[param] = 'default'
proposed_args = dict((k, v) for k, v in module.params.items() if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf', 'neighbor']:
if not isinstance(value, list):
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
if key in BOOL_PARAMS:
value = False
else:
value = 'default'
elif key == 'send_community' and str(value).lower() == 'none':
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
siimeon/Kipa | web/robot/kipa_sivuosoitteet_ja_otsikot.py | 1 | 1858 | '''Kisan alasivujen URLit ja sivujen otsikot'''
# -*- coding: utf-8 -*-
# Testikisan nimimääritys, oletusarvona: testikisa
TESTIKISA = u'testikisa'
# Kipa pääsivu
KIPA_OTSIKKO = u'Kipa - kaikki kisat'
KIPA_URL = u'http://127.0.0.1:8000/kipa'
#Suoritusten syöttö
TULOSTEN_SYOTTO_OTSIKKO = u'Kipa - Syötä tuloksia'
TULOSTEN_SYOTTO_URL = u'syota'
TULOSTEN_SYOTTO_TARKISTUS_OTSIKKO = u'Kipa - Syötä\
tuloksia - tarkistussyötteet'
TULOSTEN_SYOTTO_TARKISTUS_URL = u'syota/tarkistus'
#Tulokset
TULOSTEN_TARKISTUS_OTSIKKO = u'Kipa - Tulokset sarjoittain'
TULOSTEN_TARKISTUS_URL = u'tulosta/normaali'
TUOMARINEUVOSTO_OTSIKKO = u'Kipa - Tuomarineuvoston antamien\
tulosten määritys'
TUOMARINEUVOSTO_URL = u'maarita/tuomarineuvos'
LASKENNAN_TILANNE_OTSIKKO = u'Kipa - Tulokset sarjoittain'
LASKENNAN_TILANNE_URL = u'tulosta/tilanne'
#Kisan Määritykset
KISAN_MAARITYS_OTSIKKO = u'Kipa - Määritä kisa'
KISAN_MAARITYS_URL = u'maarita'
VARTIOIDEN_MAARITYS_OTSIKKO = u'Kipa - Määritä vartiot'
VARTIOIDEN_MAARITYS_URL = u'maarita/vartiot'
TEHTAVAN_MAARITYS_OTSIKKO = u'Kipa - Muokkaa tehtävää'
TEHTAVAN_MAARITYS_URL = u'maarita/tehtava'
TESTITULOKSIEN_MAARITYS_OTSIKKO = u'Kipa - Testituloksien määritys'
TESTITULOKSIEN_MAARITYS_URL = u'maarita/testitulos'
#Ylläpito
# huom Listaa kaikki kisat linkki vie pääsivulle
KAIKKI_KISAT_OTSIKKO = u'Kipa - kaikki kisat'
KAIKKI_KISAT_URL = KIPA_OTSIKKO
# Tallenna kisa, ei vielä osaamista filen vastaanottoon, TBD
#"http://127.0.0.1:8000/kipa/testi_kisa/tallenna/
# Kisan tuonti tiedostosta, tod.näk helppoa käyttämällä fixtuuria.TBD
KISAN_TUONTI_URL = u'korvaa'
KISAN_TUONTI_OTSIKKO = u'Kipa - Korvaa kisa tiedostosta'
# Poista Kisa
KISAN_POISTO_OTSIKKO = u'Kipa - Poista kisa'
KISAN_POISTO_URL = u'poista'
# Autentikointi
admin_tunnus = u'admin'
admin_salasana = u'admin'
| gpl-3.0 |
tlakshman26/cinder-https-changes | cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py | 20 | 1691 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
Table('volumes', meta, autoload=True)
# New table
volume_admin_metadata = Table(
'volume_admin_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_id', String(length=36), ForeignKey('volumes.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_admin_metadata.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volume_admin_metadata = Table('volume_admin_metadata',
meta,
autoload=True)
volume_admin_metadata.drop()
| apache-2.0 |
Marquand/cutorrent | simplejson/scanner.py | 4 | 1975 | """
Iterator based sre token scanner
"""
import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
import re
__all__ = ['Scanner', 'pattern']
FLAGS = (re.VERBOSE | re.MULTILINE | re.DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator | apache-2.0 |
ClearCorp-dev/odoo | addons/l10n_multilang/__openerp__.py | 339 | 1670 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi Language Chart of Accounts',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
* Multi language support for Chart of Accounts, Taxes, Tax Codes, Journals,
Accounting Templates, Analytic Chart of Accounts and Analytic Journals.
* Setup wizard changes
- Copy translations for COA, Tax, Tax Code and Fiscal Position from
templates to target objects.
""",
'website': 'http://www.openerp.com',
'depends' : ['account'],
'data': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fujunwei/chromium-crosswalk | tools/perf/benchmarks/page_cycler.py | 3 | 6823 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import page_cycler
import page_sets
class _PageCycler(benchmark.Benchmark):
options = {'pageset_repeat': 6}
cold_load_percent = 50 # % of page visits for which a cold load is forced
@classmethod
def Name(cls):
return 'page_cycler'
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--report-speed-index',
action='store_true',
help='Enable the speed index metric.')
@classmethod
def ValueCanBeAddedPredicate(cls, _, is_first_result):
return cls.cold_load_percent > 0 or not is_first_result
def CreatePageTest(self, options):
return page_cycler.PageCycler(
page_repeat = options.page_repeat,
pageset_repeat = options.pageset_repeat,
cold_load_percent = self.cold_load_percent,
report_speed_index = options.report_speed_index)
# This is an old page set, we intend to remove it after more modern benchmarks
# work on CrOS.
@benchmark.Enabled('chromeos')
class PageCyclerDhtml(_PageCycler):
"""Benchmarks for various DHTML operations like simple animations."""
page_set = page_sets.DhtmlPageSet
@classmethod
def Name(cls):
return 'page_cycler.dhtml'
class PageCyclerIntlArFaHe(_PageCycler):
"""Page load time for a variety of pages in Arabic, Farsi and Hebrew.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlArFaHePageSet
@classmethod
def Name(cls):
return 'page_cycler.intl_ar_fa_he'
@benchmark.Disabled('win') # crbug.com/366715
class PageCyclerIntlEsFrPtBr(_PageCycler):
"""Page load time for a pages in Spanish, French and Brazilian Portuguese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlEsFrPtBrPageSet
@classmethod
def Name(cls):
return 'page_cycler.intl_es_fr_pt-BR'
class PageCyclerIntlHiRu(_PageCycler):
"""Page load time benchmark for a variety of pages in Hindi and Russian.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlHiRuPageSet
@classmethod
def Name(cls):
return 'page_cycler.intl_hi_ru'
@benchmark.Disabled('android', 'win') # crbug.com/379564, crbug.com/434366
class PageCyclerIntlJaZh(_PageCycler):
"""Page load time benchmark for a variety of pages in Japanese and Chinese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlJaZhPageSet
@classmethod
def Name(cls):
return 'page_cycler.intl_ja_zh'
@benchmark.Disabled('xp') # crbug.com/434366
class PageCyclerIntlKoThVi(_PageCycler):
"""Page load time for a variety of pages in Korean, Thai and Vietnamese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlKoThViPageSet
@classmethod
def Name(cls):
return 'page_cycler.intl_ko_th_vi'
class PageCyclerMorejs(_PageCycler):
"""Page load for a variety of pages that were JavaScript heavy in 2009."""
page_set = page_sets.MorejsPageSet
@classmethod
def Name(cls):
return 'page_cycler.morejs'
# This is an old page set, we intend to remove it after more modern benchmarks
# work on CrOS.
@benchmark.Enabled('chromeos')
class PageCyclerMoz(_PageCycler):
"""Page load for mozilla's original page set. Recorded in December 2000."""
page_set = page_sets.MozPageSet
@classmethod
def Name(cls):
return 'page_cycler.moz'
# Win, mac, linux: crbug.com/353260
# Android: crbug.com/473161
@benchmark.Disabled('linux', 'win', 'mac', 'android')
class PageCyclerNetsimTop10(_PageCycler):
"""Measures load time of the top 10 sites under simulated cable network.
Recorded in June, 2013. Pages are loaded under the simplisticly simulated
bandwidth and RTT constraints of a cable modem (5Mbit/s down, 1Mbit/s up,
28ms RTT). Contention is realistically simulated, but slow start is not.
DNS lookups are 'free'.
"""
tag = 'netsim'
page_set = page_sets.Top10PageSet
options = {
'extra_wpr_args_as_string': '--shaping_type=proxy --net=cable',
'pageset_repeat': 6,
}
cold_load_percent = 100
@classmethod
def Name(cls):
return 'page_cycler.netsim.top_10'
def CreatePageTest(self, options):
return page_cycler.PageCycler(
page_repeat = options.page_repeat,
pageset_repeat = options.pageset_repeat,
cold_load_percent = self.cold_load_percent,
report_speed_index = options.report_speed_index,
clear_cache_before_each_run = True)
@benchmark.Enabled('android')
class PageCyclerTop10Mobile(_PageCycler):
"""Page load time benchmark for the top 10 mobile web pages.
Runs against pages recorded in November, 2013.
"""
@classmethod
def Name(cls):
return 'page_cycler.top_10_mobile'
def CreatePageSet(self, options):
return page_sets.Top10MobilePageSet(run_no_page_interactions=True)
@benchmark.Disabled
class PageCyclerKeyMobileSites(_PageCycler):
"""Page load time benchmark for key mobile sites."""
page_set = page_sets.KeyMobileSitesPageSet
@classmethod
def Name(cls):
return 'page_cycler.key_mobile_sites_smooth'
@benchmark.Disabled('android') # crbug.com/357326
class PageCyclerToughLayoutCases(_PageCycler):
"""Page loading for the slowest layouts observed in the Alexa top 1 million.
Recorded in July 2013.
"""
page_set = page_sets.ToughLayoutCasesPageSet
@classmethod
def Name(cls):
return 'page_cycler.tough_layout_cases'
# crbug.com/273986: This test is flakey on Windows.
@benchmark.Disabled('win')
class PageCyclerTypical25(_PageCycler):
"""Page load time benchmark for a 25 typical web pages.
Designed to represent typical, not highly optimized or highly popular web
sites. Runs against pages recorded in June, 2014.
"""
@classmethod
def Name(cls):
return 'page_cycler.typical_25'
def CreatePageSet(self, options):
return page_sets.Typical25PageSet(run_no_page_interactions=True)
# crbug.com/273986: This test is flakey on Windows.
@benchmark.Disabled # crbug.com/463346: Test is crashing Chrome.
class PageCyclerOopifTypical25(_PageCycler):
""" A varation of the benchmark above, but running in --site-per-process
to allow measuring performance of out-of-process iframes.
"""
@classmethod
def Name(cls):
return 'page_cycler_oopif.typical_25'
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs(['--site-per-process'])
def CreatePageSet(self, options):
return page_sets.Typical25PageSet(run_no_page_interactions=True)
@benchmark.Disabled # crbug.com/443730
class PageCyclerBigJs(_PageCycler):
page_set = page_sets.BigJsPageSet
@classmethod
def Name(cls):
return 'page_cycler.big_js'
| bsd-3-clause |
infected-lp/android_kernel_sony_msm | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
czhengsci/pymatgen | pymatgen/electronic_structure/tests/test_boltztrap.py | 4 | 16912 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
import json
import warnings
from pymatgen.electronic_structure.bandstructure import BandStructure
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer, \
BoltztrapRunner
from pymatgen.electronic_structure.core import Spin, OrbitalType
from monty.serialization import loadfn
from monty.os.path import which
try:
from ase.io.cube import read_cube
except ImportError:
read_cube = None
try:
import fdint
except ImportError:
fdint = None
x_trans = which("x_trans")
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapAnalyzerTest(unittest.TestCase):
def setUp(self):
self.bz = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/transp/"))
self.bz_bands = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/bands/"))
self.bz_up = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/dos_up/"), dos_spin=1)
self.bz_dw = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/dos_dw/"), dos_spin=-1)
self.bz_fermi = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/fermi/"))
with open(os.path.join(test_dir, "Cu2O_361_bandstructure.json"),
"rt") as f:
d = json.load(f)
self.bs = BandStructure.from_dict(d)
self.btr = BoltztrapRunner(self.bs, 1)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def test_properties(self):
self.assertAlmostEqual(self.bz.gap, 1.6644932121620404, 4)
array = self.bz._cond[300][102]
self.assertAlmostEqual(array[0][0] / 1e19, 7.5756518, 4)
self.assertAlmostEqual(array[0][2], -11.14679)
self.assertAlmostEqual(array[1][0], -88.203286)
self.assertAlmostEqual(array[2][2], 1.7133249e+19)
array = self.bz._seebeck[300][22]
self.assertAlmostEqual(array[0][1], 6.4546074e-22)
self.assertAlmostEqual(array[1][1], -0.00032073711)
self.assertAlmostEqual(array[1][2], -2.9868424e-24)
self.assertAlmostEqual(array[2][2], -0.0003126543)
array = self.bz._kappa[500][300]
self.assertAlmostEqual(array[0][1], 0.00014524309)
self.assertAlmostEqual(array[1][1], 328834400000000.0)
self.assertAlmostEqual(array[1][2], 3.7758069e-05)
self.assertAlmostEqual(array[2][2], 193943750000000.0)
self.assertAlmostEqual(self.bz._hall[400][800][1][0][0], 9.5623749e-28)
self.assertAlmostEqual(self.bz._hall[400][68][1][2][2], 6.5106975e-10)
self.assertAlmostEqual(self.bz.doping['p'][3], 1e18)
self.assertAlmostEqual(self.bz.mu_doping['p'][300][2], 0.1553770018406)
self.assertAlmostEqual(self.bz.mu_doping['n'][300][-1],
1.6486017632924719, 4)
self.assertAlmostEqual(self.bz._cond_doping['n'][800][3][1][1] / 1e16,
1.5564085, 4)
self.assertAlmostEqual(self.bz._seebeck_doping['p'][600][2][0][
1] / 1e-23, 3.2860613, 4)
self.assertAlmostEqual(self.bz._carrier_conc[500][67], 38.22832002)
self.assertAlmostEqual(self.bz.vol, 612.97557323964838, 4)
self.assertAlmostEqual(self.bz.intrans["scissor"], 0.0, 1)
self.assertAlmostEqual(self.bz._hall_doping['n'][700][-1][2][2][2],
5.0136483e-26)
self.assertAlmostEqual(self.bz.dos.efermi, -0.0300005507057)
self.assertAlmostEqual(self.bz.dos.energies[0], -2.4497049391830448, 4)
self.assertAlmostEqual(self.bz.dos.energies[345],
-0.72708823447130944, 4)
self.assertAlmostEqual(self.bz.dos.energies[-1], 3.7569398770153524, 4)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][400], 118.70171)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][200], 179.58562)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][300], 289.43945)
self.assertAlmostEqual(self.bz_bands._bz_bands.shape, (1316, 20))
self.assertAlmostEqual(self.bz_bands._bz_kpoints.shape, (1316, 3))
self.assertAlmostEqual(self.bz_up._dos_partial['0']['pz'][2562],
0.023862958)
self.assertAlmostEqual(self.bz_dw._dos_partial['1']['px'][3120],
5.0192891)
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data.shape,
(121, 121, 65))
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data[21][79][19],
-1.8831911809439161, 5)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_seebeck_eff_mass(self):
ref = [1.956090529381193, 2.0339311618566343, 1.1529383757896965]
ref2 = [4258.4072823354145, 4597.0351887125289, 4238.1262696392705]
sbk_mass_tens_mu = \
self.bz.get_seebeck_eff_mass(output='tensor', doping_levels=False,
temp=300)[3]
sbk_mass_tens_dop = \
self.bz.get_seebeck_eff_mass(output='tensor', doping_levels=True,
temp=300)['n'][2]
sbk_mass_avg_mu = \
self.bz.get_seebeck_eff_mass(output='average', doping_levels=False,
temp=300)[3]
sbk_mass_avg_dop = \
self.bz.get_seebeck_eff_mass(output='average', doping_levels=True,
temp=300)['n'][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 1)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 4361.4744008038842, 1)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.661553842105382, 4)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_complexity_factor(self):
ref = [2.7658776815227828, 2.9826088215568403, 0.28881335881640308]
ref2 = [0.0112022048620205, 0.0036001049607186602,
0.0083028947173193028]
sbk_mass_tens_mu = \
self.bz.get_complexity_factor(output='tensor', doping_levels=False,
temp=300)[3]
sbk_mass_tens_dop = \
self.bz.get_complexity_factor(output='tensor', doping_levels=True,
temp=300)['n'][2]
sbk_mass_avg_mu = \
self.bz.get_complexity_factor(output='average', doping_levels=False,
temp=300)[3]
sbk_mass_avg_dop = \
self.bz.get_complexity_factor(output='average', doping_levels=True,
temp=300)['n'][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 4)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 0.00628677029221, 4)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.12322832119, 4)
def test_get_seebeck(self):
ref = [-768.99078999999995, -724.43919999999991, -686.84682999999973]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_seebeck()['n'][800][3][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_seebeck(output='average')['p'][800][3], 697.608936667)
self.assertAlmostEqual(
self.bz.get_seebeck(output='average', doping_levels=False)[500][
520], 1266.7056)
self.assertAlmostEqual(
self.bz.get_seebeck(output='average', doping_levels=False)[300][65],
-36.2459389333) # TODO: this was originally "eigs"
def test_get_conductivity(self):
ref = [5.9043185000000022, 17.855599000000002, 26.462935000000002]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_conductivity()['p'][600][2][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_conductivity(output='average')['n'][700][1],
1.58736609667)
self.assertAlmostEqual(
self.bz.get_conductivity(output='average', doping_levels=False)[
300][457], 2.87163566667)
self.assertAlmostEqual(
self.bz.get_conductivity(output='average', doping_levels=False,
# TODO: this was originally "eigs"
relaxation_time=1e-15)[200][63],
16573.0536667)
def test_get_power_factor(self):
ref = [6.2736602345523362, 17.900184232304138, 26.158282220458144]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_power_factor()['p'][200][2][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_power_factor(output='average')['n'][600][4],
411.230962976)
self.assertAlmostEqual(
self.bz.get_power_factor(output='average', doping_levels=False,
relaxation_time=1e-15)[500][459],
6.59277148467)
self.assertAlmostEqual(
self.bz.get_power_factor(output='average', doping_levels=False)[
800][61], 2022.67064134) # TODO: this was originally "eigs"
def test_get_thermal_conductivity(self):
ref = [2.7719565628862623e-05, 0.00010048046886793946,
0.00015874549392499391]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_thermal_conductivity()['p'][300][2][i], ref[i])
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
relaxation_time=1e-15)['n'][500][
0],
1.74466575612e-07)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
doping_levels=False)[800][874],
8.08066254813)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
doping_levels=False)[200][32],
# TODO: this was originally "eigs"
0.0738961845832)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(k_el=False, output='average',
doping_levels=False)[200][32],
0.19429052)
def test_get_zt(self):
ref = [0.097408810215, 0.29335112354, 0.614673998089]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_zt()['n'][800][4][i], ref[i])
self.assertAlmostEqual(
self.bz.get_zt(output='average', kl=0.5)['p'][700][2],
0.0170001879916)
self.assertAlmostEqual(
self.bz.get_zt(output='average', doping_levels=False,
relaxation_time=1e-15)[300][240],
0.0041923533238348342)
eigs = self.bz.get_zt(output='eigs', doping_levels=False)[700][65]
ref_eigs = [0.082420053399668847, 0.29408035502671648,
0.40822061215079392]
for idx, val in enumerate(ref_eigs):
self.assertAlmostEqual(eigs[idx], val, 5)
def test_get_average_eff_mass(self):
ref = [0.76045816788363574, 0.96181142990667101, 2.9428428773308628]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass()['p'][300][2][i], ref[i])
ref = [1.1295783824744523, 1.3898454041924351, 5.2459984671977935]
ref2 = [6.6648842712692078, 31.492540105738343, 37.986369302138954]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass()['n'][600][1][i], ref[i])
self.assertAlmostEqual(
self.bz.get_average_eff_mass(doping_levels=False)[300][200][i],
ref2[i])
ref = [[9.61811430e-01, -8.25159596e-19, -4.70319444e-19],
[-8.25159596e-19, 2.94284288e+00, 3.00368916e-18],
[-4.70319444e-19, 3.00368916e-18, 7.60458168e-01]]
ref2 = [[2.79760445e+01, -2.39347589e-17, -1.36897140e-17],
[-2.39347589e-17, 8.55969097e+01, 8.74169648e-17],
[-1.36897140e-17, 8.74169648e-17, 2.21151980e+01]]
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='tensor')['p'][300][2][
i][j], ref[i][j])
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='tensor',
doping_levels=False)[300][500][
i][j], ref2[i][j])
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='average')['n'][300][2],
1.53769093989)
def test_get_carrier_concentration(self):
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][39] /
1e22, 6.4805156617179151, 4)
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][
693] / 1e15, -6.590800965604750, 4)
def test_get_hall_carrier_concentration(self):
self.assertAlmostEqual(self.bz.get_hall_carrier_concentration()[600][
120] / 1e21, 6.773394626767555, 4)
self.assertAlmostEqual(self.bz.get_hall_carrier_concentration()[500][
892] / 1e21, -9.136803845741777, 4)
def test_get_symm_bands(self):
structure = loadfn(
os.path.join(test_dir, 'boltztrap/structure_mp-12103.json'))
sbs = loadfn(os.path.join(test_dir, 'boltztrap/dft_bs_sym_line.json'))
kpoints = [kp.frac_coords for kp in sbs.kpoints]
labels_dict = {k: sbs.labels_dict[k].frac_coords for k in
sbs.labels_dict}
for kpt_line, labels_dict in zip([None, sbs.kpoints, kpoints],
[None, sbs.labels_dict, labels_dict]):
sbs_bzt = self.bz_bands.get_symm_bands(structure, -5.25204548,
kpt_line=kpt_line,
labels_dict=labels_dict)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up]), 20)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up][1]), 143)
# def test_check_acc_bzt_bands(self):
# structure = loadfn(os.path.join(test_dir,'boltztrap/structure_mp-12103.json'))
# sbs = loadfn(os.path.join(test_dir,'boltztrap/dft_bs_sym_line.json'))
# sbs_bzt = self.bz_bands.get_symm_bands(structure,-5.25204548)
# corr,werr_vbm,werr_cbm,warn = BoltztrapAnalyzer.check_acc_bzt_bands(sbs_bzt,sbs)
# self.assertAlmostEqual(corr[2],9.16851750e-05)
# self.assertAlmostEqual(werr_vbm['K-H'],0.18260273521047862)
# self.assertAlmostEqual(werr_cbm['M-K'],0.071552669981356981)
# self.assertFalse(warn)
def test_get_complete_dos(self):
structure = loadfn(
os.path.join(test_dir, 'boltztrap/structure_mp-12103.json'))
cdos = self.bz_up.get_complete_dos(structure, self.bz_dw)
spins = list(cdos.densities.keys())
self.assertIn(Spin.down, spins)
self.assertIn(Spin.up, spins)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.p].densities[Spin.up][3134],
43.839230100999991)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.s].densities[Spin.down][716],
6.5383268000000001)
def test_extreme(self):
x = self.bz.get_extreme("seebeck")
self.assertEqual(x["best"]["carrier_type"], "n")
self.assertAlmostEqual(x["p"]["value"], 1255.365, 2)
self.assertEqual(x["n"]["isotropic"], True)
self.assertEqual(x["n"]["temperature"], 600)
x = self.bz.get_extreme("kappa", maximize=False, min_temp=400,
min_doping=1E20)
self.assertAlmostEqual(x["best"]["value"], 0.105, 2)
self.assertAlmostEqual(x["n"]["value"], 0.139, 2)
self.assertEqual(x["p"]["temperature"], 400)
self.assertEqual(x["n"]["isotropic"], False)
def test_to_from_dict(self):
btr_dict = self.btr.as_dict()
s = json.dumps(btr_dict)
self.assertIsNotNone(s)
self.assertIsNotNone(btr_dict['bs'])
if __name__ == '__main__':
unittest.main()
| mit |
varunarya10/boto | boto/glacier/__init__.py | 145 | 1685 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon Glacier service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.glacier.layer2 import Layer2
return get_regions('glacier', connection_cls=Layer2)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
jk1/intellij-community | python/helpers/coveragepy/coverage/config.py | 39 | 12763 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Config file for coverage.py"""
import collections
import os
import re
import sys
from coverage.backward import configparser, iitems, string_class
from coverage.misc import contract, CoverageException, isolate_module
os = isolate_module(os)
class HandyConfigParser(configparser.RawConfigParser):
"""Our specialization of ConfigParser."""
def __init__(self, section_prefix):
configparser.RawConfigParser.__init__(self)
self.section_prefix = section_prefix
def read(self, filename):
"""Read a file name as UTF-8 configuration data."""
kwargs = {}
if sys.version_info >= (3, 2):
kwargs['encoding'] = "utf-8"
return configparser.RawConfigParser.read(self, filename, **kwargs)
def has_option(self, section, option):
section = self.section_prefix + section
return configparser.RawConfigParser.has_option(self, section, option)
def has_section(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.has_section(self, section)
def options(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.options(self, section)
def get_section(self, section):
"""Get the contents of a section, as a dictionary."""
d = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
def get(self, section, *args, **kwargs):
"""Get a value, replacing environment variables also.
The arguments are the same as `RawConfigParser.get`, but in the found
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
environment variable ``WORD``.
Returns the finished value.
"""
section = self.section_prefix + section
v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
def dollar_replace(m):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
word = next(w for w in m.groups() if w is not None) # pragma: part covered
if word == "$":
return "$"
else:
return os.environ.get(word, '')
dollar_pattern = r"""(?x) # Use extended regex syntax
\$(?: # A dollar sign, then
(?P<v1>\w+) | # a plain word,
{(?P<v2>\w+)} | # or a {-wrapped word,
(?P<char>[$]) # or a dollar sign.
)
"""
v = re.sub(dollar_pattern, dollar_replace, v)
return v
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values
def getregexlist(self, section, option):
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
list of regexes. Each value is stripped of whitespace.
Returns the list of strings.
"""
line_list = self.get(section, option)
value_list = []
for value in line_list.splitlines():
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(
"Invalid [%s].%s value %r: %s" % (section, option, value, e)
)
if value:
value_list.append(value)
return value_list
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
'while (True|1|False|0):',
'if (True|1|False|0):',
]
class CoverageConfig(object):
"""Coverage.py configuration.
The attributes of this class are the various settings that control the
operation of coverage.py.
"""
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
self.attempted_config_files = []
self.config_files = []
# Defaults for [run]
self.branch = False
self.concurrency = None
self.cover_pylib = False
self.data_file = ".coverage"
self.debug = []
self.note = None
self.parallel = False
self.plugins = []
self.source = None
self.timid = False
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
self.fail_under = 0
self.ignore_errors = False
self.include = None
self.omit = None
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.partial_list = DEFAULT_PARTIAL[:]
self.precision = 0
self.show_missing = False
self.skip_covered = False
# Defaults for [html]
self.extra_css = None
self.html_dir = "htmlcov"
self.html_title = "Coverage report"
# Defaults for [xml]
self.xml_output = "coverage.xml"
self.xml_package_depth = 99
# Defaults for [paths]
self.paths = {}
# Options for plugins
self.plugin_options = {}
MUST_BE_LIST = ["omit", "include", "debug", "plugins", "concurrency"]
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v)
@contract(filename=str)
def from_file(self, filename, section_prefix=""):
"""Read configuration from a .rc file.
`filename` is a file name to read.
Returns True or False, whether the file could be read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser(section_prefix)
try:
files_read = cp.read(filename)
except configparser.Error as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
if not files_read:
return False
self.config_files.extend(files_read)
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
self._set_attr_from_config_option(cp, *option_spec)
except ValueError as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in iitems(all_options):
if cp.has_section(section):
for unknown in set(cp.options(section)) - options:
if section_prefix:
section = section_prefix + section
raise CoverageException(
"Unrecognized option '[%s] %s=' in config file %s" % (
section, unknown, filename
)
)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
return True
CONFIG_FILE_OPTIONS = [
# These are *args for _set_attr_from_config_option:
# (attr, where, type_="")
#
# attr is the attribute to set on the CoverageConfig object.
# where is the section:name to read from the configuration file.
# type_ is the optional type to apply, by using .getTYPE to read the
# configuration value from the file.
# [run]
('branch', 'run:branch', 'boolean'),
('concurrency', 'run:concurrency', 'list'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
('include', 'run:include', 'list'),
('note', 'run:note'),
('omit', 'run:omit', 'list'),
('parallel', 'run:parallel', 'boolean'),
('plugins', 'run:plugins', 'list'),
('source', 'run:source', 'list'),
('timid', 'run:timid', 'boolean'),
# [report]
('exclude_list', 'report:exclude_lines', 'regexlist'),
('fail_under', 'report:fail_under', 'int'),
('ignore_errors', 'report:ignore_errors', 'boolean'),
('include', 'report:include', 'list'),
('omit', 'report:omit', 'list'),
('partial_always_list', 'report:partial_branches_always', 'regexlist'),
('partial_list', 'report:partial_branches', 'regexlist'),
('precision', 'report:precision', 'int'),
('show_missing', 'report:show_missing', 'boolean'),
('skip_covered', 'report:skip_covered', 'boolean'),
('sort', 'report:sort'),
# [html]
('extra_css', 'html:extra_css'),
('html_dir', 'html:directory'),
('html_title', 'html:title'),
# [xml]
('xml_output', 'xml:output'),
('xml_package_depth', 'xml:package_depth', 'int'),
]
def _set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser."""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get' + type_)
setattr(self, attr, method(section, option))
def get_plugin_options(self, plugin):
"""Get a dictionary of options for the plugin named `plugin`."""
return self.plugin_options.get(plugin, {})
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
`value` is the new value for the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
setattr(self, attr, value)
return
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
self.plugin_options.setdefault(plugin_name, {})[key] = value
return
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
return getattr(self, attr)
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
return self.plugin_options.get(plugin_name, {}).get(key)
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
| apache-2.0 |
sirikata/sirikata | tools/space/test_deploy/server.py | 1 | 2562 | #!/usr/bin/env python
import subprocess
import os.path
# This script manages running a single server instance given some
# configuration information. It will usually be driven by a parent
# script which deals with common configuration and generating
# per-instance configuration options.
def AddStandardParams(appname, idx, args, env, **kwargs):
if ('with_xterm' in kwargs and kwargs['with_xterm']):
args.extend(['gnome-terminal', '-x'])
if ('debug' in kwargs and kwargs['debug']):
args.extend(['gdb', '-ex', 'run', '--args'])
if ('valgrind' in kwargs and kwargs['valgrind']):
args.extend(['valgrind'])
if ('heapcheck' in kwargs and kwargs['heapcheck']):
# Value should be the type of checking: minimal, normal,
# strict, draconian
env['HEAPCHECK'] = kwargs['heapcheck']
elif ('heapprofile' in kwargs and kwargs['heapprofile']):
# Value should be something like '/tmp' to indicate where we
# should store heap profiling samples. In that example, the
# samples will be stored into /tmp/appname.1.0001.heap,
# /tmp/appname.1.0002.heap, etc.
env['HEAPPROFILE'] = kwargs['heapprofile'] + '/' + appname + '.' + str(idx)
if ('heapprofile_interval' in kwargs and kwargs['heapprofile_interval']):
env['HEAP_PROFILE_ALLOCATION_INTERVAL'] = str(kwargs['heapprofile_interval'])
def GetAppFile(appname, **kwargs):
if 'sirikata_path' not in kwargs or not kwargs['sirikata_path']:
return './' + appname
else:
return os.path.join(kwargs['sirikata_path'], appname)
def RunApp(appname, idx, args, **kwargs):
full_args = []
full_env = {}
AddStandardParams(appname, idx, full_args, full_env, **kwargs)
full_args.extend([GetAppFile(appname, **kwargs)])
full_args.extend(args)
if 'plugins' in kwargs and kwargs['plugins'] is not None:
full_args.extend([ kwargs['plugins'] ])
if 'save_log' in kwargs and kwargs['save_log']:
lf = os.path.join(kwargs['save_log'], appname + '-' + str(idx) + '.log')
full_args.extend(['--log-file=%s' % (lf)])
print 'Running:', full_args, 'with environment:', full_env
# Clear full_env if its empty
if not full_env: full_env = None
return subprocess.Popen(full_args, env=full_env)
def RunPinto(args, **kwargs):
return RunApp('pinto_d', 0, args, **kwargs)
def RunCSeg(args, **kwargs):
return RunApp('cseg_d', 0, args, **kwargs)
def RunSpace(ssid, args, **kwargs):
return RunApp('space_d', ssid, args, **kwargs)
| bsd-3-clause |
dbcli/vcli | vcli/packages/vspecial/tests/dbutils.py | 17 | 1980 | import pytest
import psycopg2
import psycopg2.extras
# TODO: should this be somehow be divined from environment?
POSTGRES_USER, POSTGRES_HOST = 'postgres', 'localhost'
def db_connection(dbname=None):
conn = psycopg2.connect(user=POSTGRES_USER, host=POSTGRES_HOST,
database=dbname)
conn.autocommit = True
return conn
try:
conn = db_connection()
CAN_CONNECT_TO_DB = True
SERVER_VERSION = conn.server_version
except:
CAN_CONNECT_TO_DB = False
SERVER_VERSION = 0
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB,
reason="Need a postgres instance at localhost accessible by user 'postgres'")
def create_db(dbname):
with db_connection().cursor() as cur:
try:
cur.execute('''CREATE DATABASE _test_db''')
except:
pass
def setup_db(conn):
with conn.cursor() as cur:
# schemas
cur.execute('create schema schema1')
cur.execute('create schema schema2')
# tables
cur.execute('create table tbl1(id1 integer, txt1 text)')
cur.execute('create table tbl2(id2 integer, txt2 text)')
cur.execute('create table schema1.s1_tbl1(id1 integer, txt1 text)')
# views
cur.execute('create view vw1 as select * from tbl1')
cur.execute('''create view schema1.s1_vw1 as select * from
schema1.s1_tbl1''')
# datatype
cur.execute('create type foo AS (a int, b text)')
# functions
cur.execute('''create function func1() returns int language sql as
$$select 1$$''')
cur.execute('''create function schema1.s1_func1() returns int language
sql as $$select 2$$''')
def teardown_db(conn):
with conn.cursor() as cur:
cur.execute('''
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
DROP SCHEMA IF EXISTS schema1 CASCADE;
DROP SCHEMA IF EXISTS schema2 CASCADE''')
| bsd-3-clause |
abhishek-ram/pyas2 | pyas2/management/commands/runas2server.py | 1 | 3051 | from django.core.management.base import BaseCommand
from django.core.handlers.wsgi import WSGIHandler
from django.utils.translation import ugettext as _
from pyas2 import pyas2init
import pyas2
import os
class Command(BaseCommand):
help = _(u'Starts the PyAS2 server')
def handle(self, *args, **options):
try:
import cherrypy
from cherrypy import wsgiserver
except Exception:
raise ImportError(_(u'Dependency failure: cherrypy library is needed to start the as2 server'))
cherrypy.config.update({
'global': {
'log.screen': False,
'log.error_file': os.path.join(pyas2init.gsettings['log_dir'], 'cherrypy_error.log'),
'server.environment': pyas2init.gsettings['environment']
}
})
# cherrypy handling of static files
conf = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': os.path.abspath(
os.path.dirname(pyas2.__file__))
}
}
servestaticfiles = cherrypy.tree.mount(None, '/static', conf)
# cherrypy handling of django
# was: servedjango = AdminMediaHandler(WSGIHandler())
# but django does not need the AdminMediaHandler in this setup. is much faster.
servedjango = WSGIHandler()
# cherrypy uses a dispatcher in order to handle the serving of
# static files and django.
dispatcher = wsgiserver.WSGIPathInfoDispatcher(
{'/': servedjango, '/static': servestaticfiles})
pyas2server = wsgiserver.CherryPyWSGIServer(
bind_addr=('0.0.0.0', pyas2init.gsettings['port']),
wsgi_app=dispatcher,
server_name='pyas2-webserver'
)
pyas2init.logger.log(
25, _(u'PyAS2 server running at port: "%s".' % pyas2init.gsettings['port']))
# handle ssl: cherrypy < 3.2 always uses pyOpenssl. cherrypy >= 3.2
# uses python buildin ssl (python >= 2.6 has buildin support for ssl).
ssl_certificate = pyas2init.gsettings['ssl_certificate']
ssl_private_key = pyas2init.gsettings['ssl_private_key']
if ssl_certificate and ssl_private_key:
if cherrypy.__version__ >= '3.2.0':
adapter_class = wsgiserver.get_ssl_adapter_class('builtin')
pyas2server.ssl_adapter = adapter_class(ssl_certificate, ssl_private_key)
else:
# but: pyOpenssl should be there!
pyas2server.ssl_certificate = ssl_certificate
pyas2server.ssl_private_key = ssl_private_key
pyas2init.logger.log(25, _(u'PyAS2 server uses ssl (https).'))
else:
pyas2init.logger.log(25, _(u'PyAS2 server uses plain http (no ssl).'))
# start the cherrypy webserver.
try:
pyas2server.start()
except KeyboardInterrupt:
pyas2server.stop()
| gpl-2.0 |
THESLA/Saber_Prototipe | node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| apache-2.0 |
helldorado/ansible | lib/ansible/modules/remote_management/ucs/ucs_vlans.py | 64 | 6941 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_vlans
short_description: Configures VLANs on Cisco UCS Manager
description:
- Configures VLANs on Cisco UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify VLANs are present and will create if needed.
- If C(absent), will verify VLANs are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name assigned to the VLAN.
- The VLAN name is case sensitive.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the VLAN is created.
required: yes
multicast_policy:
description:
- The multicast policy associated with this VLAN.
- This option is only valid if the Sharing Type field is set to None or Primary.
default: ''
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
- For upstream disjoint L2 networks, Cisco recommends that you choose common to create VLANs that apply to both fabrics.
choices: [common, A, B]
default: common
id:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- You cannot create VLANs with IDs from 4030 to 4047. This range of VLAN IDs is reserved.
- The VLAN IDs you specify must also be supported on the switch that you are using.
- VLANs in the LAN cloud and FCoE VLANs in the SAN cloud must have different IDs.
- Optional if state is absent.
required: yes
sharing:
description:
- The Sharing Type field.
- "Whether this VLAN is subdivided into private or secondary VLANs. This can be one of the following:"
- "none - This VLAN does not have any secondary or private VLANs. This is a regular VLAN."
- "primary - This VLAN can have one or more secondary VLANs, as shown in the Secondary VLANs area. This VLAN is a primary VLAN in the private VLAN domain."
- "isolated - This is a private VLAN associated with a primary VLAN. This VLAN is an Isolated VLAN."
- "community - This VLAN can communicate with other ports on the same community VLAN as well as the promiscuous port. This VLAN is a Community VLAN."
choices: [none, primary, isolated, community]
default: none
native:
description:
- Designates the VLAN as a native VLAN.
choices: ['yes', 'no']
default: 'no'
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure VLAN
ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
id: '2'
native: 'yes'
- name: Remove VLAN
ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
name=dict(type='str', required=True),
multicast_policy=dict(type='str', default=''),
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
id=dict(type='str'),
sharing=dict(type='str', default='none', choices=['none', 'primary', 'isolated', 'community']),
native=dict(type='str', default='no', choices=['yes', 'no']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['id']],
],
)
ucs = UCSModule(module)
err = False
# UCSModule creation above verifies ucsmsdk is present and exits on failure, so additional imports are done below.
from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan
changed = False
try:
mo_exists = False
props_match = False
# dn is fabric/lan/net-<name> for common vlans or fabric/lan/[A or B]/net-<name> for A or B
dn_base = 'fabric/lan'
if module.params['fabric'] != 'common':
dn_base += '/' + module.params['fabric']
dn = dn_base + '/net-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(id=module.params['id'])
kwargs['default_net'] = module.params['native']
kwargs['sharing'] = module.params['sharing']
kwargs['mcast_policy_name'] = module.params['multicast_policy']
if (mo.check_prop_match(**kwargs)):
props_match = True
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = FabricVlan(
parent_mo_or_dn=dn_base,
name=module.params['name'],
id=module.params['id'],
default_net=module.params['native'],
sharing=module.params['sharing'],
mcast_policy_name=module.params['multicast_policy'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
johnpbatty/python-neutronclient | neutronclient/tests/unit/test_cli20_network.py | 2 | 25072 | # All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from oslo_serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import network
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20NetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20NetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_network(self):
"""Create net: myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_unicode(self):
"""Create net: u'\u7f51\u7edc'."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tenant(self):
"""Create net: --tenant_id tenantid myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_provider_args(self):
"""Create net: with --provider arguments."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
# Test --provider attributes before network name
args = ['--provider:network_type', 'vlan',
'--provider:physical_network', 'physnet1',
'--provider:segmentation_id', '400', name]
position_names = ['provider:network_type',
'provider:physical_network',
'provider:segmentation_id', 'name']
position_values = ['vlan', 'physnet1', '400', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tags(self):
"""Create net: myname --tags a b."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
"""Create net: --admin_state_down myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
# Test dashed options
args = ['--admin-state-down', name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_create_network_vlan_transparent(self):
"""Create net: myname --vlan-transparent True."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--vlan-transparent', 'True', name]
vlantrans = {'vlan_transparent': 'True'}
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
**vlantrans)
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(path, query),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=(),
fields_1=(), fields_2=(), page_size=None,
sort_key=(), sort_dir=()):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
"""list nets: --sort-key name --sort-key id --sort-dir asc
--sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
"""list nets: --sort-key name --sort-key id --sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
"""list nets: --sort-key name --sort-dir desc --sort-dir asc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
"""list nets: -P."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
"""list nets: -D."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
"""List nets: -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
"""List nets: -- --tags u'\u7f51\u7edc'."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
"""List nets: -D -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_list_stub('networks', data, '')
cmd.get_client().AndReturn(self.client)
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.get_data(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(_result), len(expected))
for res, exp in zip(_result, expected):
self.assertEqual(len(res), len(exp))
for a, b in zip(res, exp):
self.assertEqual(a, b)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
"""List nets: --fields a --fields b -- --fields c d."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=('-f', 'json')):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(1, len(net))
self.assertIn("id", net.keys())
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(3, len(net))
self.assertEqual(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_external_nets_detail(self):
"""list external nets: -D."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
"""List external nets: -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
"""List external nets: -D -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_externel_nets_fields(self):
"""List external nets: --fields a --fields b -- --fields c d."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_network_exception(self):
"""Update net: myid."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
"""Update net: myid --name myname --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_network_with_unicode(self):
"""Update net: myid --name u'\u7f51\u7edc' --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_show_network(self):
"""Show net: --fields id --fields name myid."""
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_network(self):
"""Delete net: myid."""
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(
path, 'fields=id&fields=cidr' + filters), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client.httpclient,
"_check_uri_length")
self.client.httpclient._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client.httpclient._check_uri_length(
mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(
path, 'fields=id&fields=cidr%s' % filters),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
class CLITestV20NetworkXML(CLITestV20NetworkJSON):
format = 'xml'
| apache-2.0 |
wjn740/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
talbrecht/pism_pik07 | examples/marine/circular/circular_ice_sheet.py | 2 | 2528 | #!/usr/bin/env python
# Copyright (C) 2012, 2013, 2014 Ricarda Winkelmann, Torsten Albrecht,
# Ed Bueler, and Constantine Khroulev
import numpy as np
import scipy.optimize as opt
import PISMNC
import piktests_utils
# command line arguments
options = piktests_utils.process_options("circular_withshelf.nc",
domain_size=3600)
p = piktests_utils.Parameters()
dx, dy, x, y = piktests_utils.create_grid(options)
# Compute the calving front radius
x_cf = 1000.0 * options.domain_size / 3 # at 1200 km
y_cf = 1000.0 * options.domain_size / 3 # at 1200 km
r_cf = np.sqrt(x_cf ** 2 + y_cf ** 2) # calving front position in m
# create arrays which will go in the output file
thk = np.zeros((options.My, options.Mx)) # sheet/shelf thickness
bed = np.zeros_like(thk) # bedrock surface elevation
Ts = np.zeros_like(thk) + p.air_temperature
accum = np.zeros_like(thk) + p.accumulation_rate * p.rho_ice
def MISMIP_bed(r):
s = r / 1000.0
n = 729.0
m2 = -2184.80 / (750.0) ** 2
m4 = 1031.72 / (750.0) ** 4
m6 = -151.72 / (750.0) ** 6
return n + m2 * s ** 2 + m4 * s ** 4 + m6 * s ** 6 # in m
def MISMIP_thk(r):
thk_cf = 200.0 # ice thickness at calving front
thk_max = 4000.0 # maximal ice thickness in m
a = -(thk_cf - thk_max) / (r_cf) ** 4
b = 2 * (thk_cf - thk_max) / (r_cf) ** 2
c = thk_max
return a * r ** 4 + b * r ** 2 + c
# bedrock and ice thickness
for j in range(options.My):
for i in range(options.Mx):
radius = np.sqrt(x[i] ** 2 + y[j] ** 2) # radius in m
# set bedrock as in MISMIP experiment
bed[j, i] = MISMIP_bed(radius)
# set thickness
if radius <= r_cf:
thk[j, i] = MISMIP_thk(radius)
# clip bed topography
bed[bed < p.topg_min] = p.topg_min
# Compute the grounding line radius
def f(x):
"floatation criterion: rho_ice/rho_ocean * thk + bed = 0"
return (p.rho_ice / p.rho_ocean) * MISMIP_thk(x) + MISMIP_bed(x)
r_gl = opt.bisect(f, 0, r_cf)
print "grounding line radius = %.2f km" % (r_gl / 1000.0)
ncfile = PISMNC.PISMDataset(options.output_filename, 'w', format='NETCDF3_CLASSIC')
piktests_utils.prepare_output_file(ncfile, x, y, include_vel_bc=False)
variables = {"thk": thk,
"topg": bed,
"ice_surface_temp": Ts,
"climatic_mass_balance": accum}
piktests_utils.write_data(ncfile, variables)
ncfile.close()
print "Successfully created %s" % options.output_filename
| gpl-3.0 |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/dis.py | 270 | 6499 | """Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType,
types.ClassType, type)
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i in linestarts:
if i > 0:
print
print "%3d" % linestarts[i],
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print repr(oparg).rjust(5),
if op in hasconst:
print '(' + repr(co.co_consts[oparg]) + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print repr(oparg).rjust(5),
if op in hasconst:
if constants:
print '(' + repr(constants[oparg]) + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
| lgpl-3.0 |
tornadozou/tensorflow | tensorflow/tools/ci_build/update_version.py | 10 | 12706 | #!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically update TensorFlow version in source files
#
# Usage:
# ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc0
# ./tensorflow/tools/ci_build/update_version.py --nightly
#
"""Update version of TensorFlow script."""
# pylint: disable=superfluous-parens
import argparse
import fileinput
import os
import re
import subprocess
import time
# File parameters
TF_SRC_DIR = "tensorflow"
VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel" % TF_SRC_DIR
GPU_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-gpu" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR,
VERSION_H,
SETUP_PY,
README_MD,
DEVEL_DOCKERFILE,
GPU_DEVEL_DOCKERFILE]
# Version type parameters
NIGHTLY_VERSION = 1
REGULAR_VERSION = 0
def replace_line(old_line, new_line, filename):
"""Replace a line in a file."""
for line in fileinput.input(filename, inplace=True):
print(line.rstrip().replace(old_line, new_line))
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found. Are you under the TensorFlow source root"
" directory?")
def check_all_files():
"""Check all relevant files necessary for upgrade."""
for file_name in RELEVANT_FILES:
check_existence(file_name)
def replace_with_sed(query, filename):
"""Replace with sed when regex is required."""
subprocess.check_call(['sed', '-i', '-r', '-e', query, filename])
class Version(object):
"""Version class object that stores SemVer version information."""
def __init__(self, major, minor, patch, identifier_string, version_type):
"""Constructor.
Args:
major: major string eg. (1)
minor: minor string eg. (3)
patch: patch string eg. (1)
identifier_string: extension string eg. (-rc0)
version_type: version parameter ((REGULAR|NIGHTLY)_VERSION)
"""
self.string = "%s.%s.%s%s" % (major,
minor,
patch,
identifier_string)
self.major = major
self.minor = minor
self.patch = patch
self.identifier_string = identifier_string
self.version_type = version_type
def __str__(self):
return self.string
@property
def pep_440_str(self):
if self.version_type == REGULAR_VERSION:
return_string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
return return_string.replace("-", "")
else:
return_string = "%s.%s.%s" % (self.major,
self.minor,
self.identifier_string)
return return_string.replace("-", "")
@staticmethod
def parse_from_string(string, version_type):
"""Returns version object from Semver string.
Args:
string: version string
version_type: version parameter
Raises:
RuntimeError: If the version string is not valid.
"""
# Check validity of new version string
if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string):
raise RuntimeError("Invalid version string: %s" % string)
major, minor, extension = string.split(".", 2)
# Isolate patch and identifier string if identifier string exists
extension_split = extension.split("-", 1)
patch = extension_split[0]
if len(extension_split) == 2:
identifier_string = "-" + extension_split[1]
else:
identifier_string = ""
return Version(major,
minor,
patch,
identifier_string,
version_type)
def get_current_semver_version():
"""Returns a Version object of current version.
Returns:
version: Version object of current SemVer string based on information from
core/public/version.h
"""
# Get current version information
version_file = open(VERSION_H, "r")
for line in version_file:
major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line)
minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line)
patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line)
extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line)
if major_match:
old_major = major_match.group(1)
if minor_match:
old_minor = minor_match.group(1)
if patch_match:
old_patch_num = patch_match.group(1)
if extension_match:
old_extension = extension_match.group(1)
break
if "dev" in old_extension:
version_type = NIGHTLY_VERSION
else:
version_type = REGULAR_VERSION
return Version(old_major,
old_minor,
old_patch_num,
old_extension,
version_type)
def update_version_h(old_version, new_version):
"""Update tensorflow/core/public/version.h."""
replace_line("#define TF_MAJOR_VERSION %s" % old_version.major,
"#define TF_MAJOR_VERSION %s" % new_version.major, VERSION_H)
replace_line("#define TF_MINOR_VERSION %s" % old_version.minor,
"#define TF_MINOR_VERSION %s" % new_version.minor, VERSION_H)
replace_line("#define TF_PATCH_VERSION %s" % old_version.patch,
"#define TF_PATCH_VERSION %s" % new_version.patch, VERSION_H)
replace_line("#define TF_VERSION_SUFFIX \"%s\"" %
old_version.identifier_string,
"#define TF_VERSION_SUFFIX \"%s\""
% new_version.identifier_string,
VERSION_H)
def update_setup_dot_py(old_version, new_version):
"""Update setup.py."""
replace_line("_VERSION = '%s'" % old_version.string,
"_VERSION = '%s'" % new_version.string, SETUP_PY)
def update_readme(old_version, new_version):
"""Update README."""
pep_440_str = new_version.pep_440_str
replace_with_sed(r"s/%s\.%s\.([[:alnum:]]+)-/%s-/g" % (old_version.major,
old_version.minor,
pep_440_str),
README_MD)
def update_md_files(old_version, new_version):
"""Update the md doc files.
Args:
old_version: Version object of current version
new_version: Version object of new version
"""
old_pep_version = old_version.pep_440_str
new_pep_version = new_version.pep_440_str
for filename in ["linux", "mac", "windows", "sources"]:
filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
filename)
replace_with_sed("s/tensorflow-%s/tensorflow-%s/g"
% (old_pep_version, new_pep_version), filepath)
replace_with_sed("s/tensorflow_gpu-%s/tensorflow_gpu-%s/g"
% (old_pep_version, new_pep_version), filepath)
replace_with_sed("s/TensorFlow %s/TensorFlow %s/g"
% (old_pep_version, new_pep_version), filepath)
for filename in ["java", "go", "c"]:
filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
filename)
replace_with_sed(r"s/x86_64-%s/x86_64-%s/g"
% (old_version, new_version), filepath)
replace_with_sed(r"s/libtensorflow-%s.jar/libtensorflow-%s.jar/g"
% (old_version, new_version), filepath)
replace_with_sed(r"s/<version>%s<\/version>/<version>%s<\/version>/g"
% (old_version, new_version), filepath)
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
def update_dockerfiles(old_version, new_version):
"""Update dockerfiles if there was a major change."""
if major_minor_change(old_version, new_version):
old_r_major_minor = r"r%s\.%s" % (old_version.major, old_version.minor)
old_r_major_minor_string = old_r_major_minor.replace("\\", "")
r_major_minor = r"r%s\.%s" % (new_version.major, new_version.minor)
r_major_minor_string = r_major_minor.replace("\\", "")
print("Detected Major.Minor change.")
print("Updating pattern %s to %s in additional files"
% (old_r_major_minor_string, r_major_minor_string))
# Update dockerfiles
replace_with_sed("s/%s/%s/g"
% (old_r_major_minor, r_major_minor), DEVEL_DOCKERFILE)
replace_with_sed("s/%s/%s/g"
% (old_r_major_minor, r_major_minor), GPU_DEVEL_DOCKERFILE)
def check_for_lingering_string(lingering_string):
"""Check for given lingering strings."""
formatted_string = lingering_string.replace(".", r"\.")
try:
linger_str_output = subprocess.check_output(
["grep", "-rnoH", formatted_string, TF_SRC_DIR])
linger_strs = linger_str_output.decode("utf8").split("\n")
except subprocess.CalledProcessError:
linger_strs = []
if linger_strs:
print("WARNING: Below are potentially instances of lingering old version "
"string \"%s\" in source directory \"%s/\" that are not "
"updated by this script. Please check them manually!"
% (lingering_string, TF_SRC_DIR))
for linger_str in linger_strs:
print(linger_str)
else:
print("No lingering old version strings \"%s\" found in source directory"
" \"%s/\". Good." % (lingering_string, TF_SRC_DIR))
def check_for_old_version(old_version, new_version):
"""Check for old version references."""
for old_ver in [old_version.string, old_version.pep_440_str]:
check_for_lingering_string(old_ver)
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
check_for_lingering_string(old_r_major_minor)
def main():
"""This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
group = parser.add_mutually_exclusive_group(required=True)
# Arg information
group.add_argument("--version",
help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>",
default="")
group.add_argument("--nightly",
help="disable the service provisioning step",
action="store_true")
args = parser.parse_args()
check_all_files()
old_version = get_current_semver_version()
if args.nightly:
# dev minor version is one ahead of official
nightly_minor_ver = int(old_version.minor) + 1
new_version = Version(old_version.major,
str(nightly_minor_ver),
old_version.patch,
"-dev" + time.strftime("%Y%m%d"),
NIGHTLY_VERSION)
else:
new_version = Version.parse_from_string(args.version, REGULAR_VERSION)
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
update_md_files(old_version, new_version)
update_dockerfiles(old_version, new_version)
# Print transition details
print("Major: %s -> %s" % (old_version.major, new_version.major))
print("Minor: %s -> %s" % (old_version.minor, new_version.minor))
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
if __name__ == "__main__":
main()
| apache-2.0 |
darkleons/BE | addons/l10n_fr/report/__init__.py | 424 | 1475 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
usc-isi/nova | nova/openstack/common/excutils.py | 21 | 1732 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import contextlib
import logging
import sys
import traceback
@contextlib.contextmanager
def save_and_reraise_exception():
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
"""
type_, value, tb = sys.exc_info()
try:
yield
except Exception:
logging.error('Original exception being dropped: %s' %
(traceback.format_exception(type_, value, tb)))
raise
raise type_, value, tb
| apache-2.0 |
vivek8943/django-chartit | chartit/utils.py | 7 | 1360 | from collections import defaultdict
def _convert_to_rdd(obj):
"""Accepts a dict or a list of dicts and converts it to a
RecursiveDefaultDict."""
if isinstance(obj, dict):
rdd = RecursiveDefaultDict()
for k, v in obj.items():
rdd[k] = _convert_to_rdd(v)
return rdd
elif isinstance(obj, list):
rddlst = []
for ob in obj:
rddlst.append(_convert_to_rdd(ob))
return rddlst
else:
return obj
class RecursiveDefaultDict(defaultdict):
"""The name says it all.
"""
def __init__(self, data = None):
self.default_factory = type(self)
if data is not None:
self.data = _convert_to_rdd(data)
self.update(self.data)
del self.data
def __getitem__(self, key):
return super(RecursiveDefaultDict, self).__getitem__(key)
def __setitem__(self, key, item):
if not isinstance(item, RecursiveDefaultDict):
super(RecursiveDefaultDict, self).__setitem__(key,
_convert_to_rdd(item))
else:
super(RecursiveDefaultDict, self).__setitem__(key, item)
def update(self, element):
super(RecursiveDefaultDict, self).update(_convert_to_rdd(element))
| bsd-2-clause |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_available_ssl_options_py3.py | 1 | 3168 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ApplicationGatewayAvailableSslOptions(Resource):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param predefined_policies: List of available Ssl predefined policy.
:type predefined_policies:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:param default_policy: Name of the Ssl predefined policy applied by
default to application gateway. Possible values include:
'AppGwSslPolicy20150501', 'AppGwSslPolicy20170401',
'AppGwSslPolicy20170401S'
:type default_policy: str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslPolicyName
:param available_cipher_suites: List of available Ssl cipher suites.
:type available_cipher_suites: list[str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslCipherSuite]
:param available_protocols: List of available Ssl protocols.
:type available_protocols: list[str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslProtocol]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'},
'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'},
'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'},
'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, predefined_policies=None, default_policy=None, available_cipher_suites=None, available_protocols=None, **kwargs) -> None:
super(ApplicationGatewayAvailableSslOptions, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.predefined_policies = predefined_policies
self.default_policy = default_policy
self.available_cipher_suites = available_cipher_suites
self.available_protocols = available_protocols
| mit |
enriquesanchezb/practica_utad_2016 | venv/lib/python2.7/site-packages/pip/download.py | 143 | 31722 | from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], platform.libc_ver()),
))
if libc:
distro["libc"] = libc
if distro:
data["distro"] = distro
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embeddded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| apache-2.0 |
ning/collector | src/utils/py/scribe/scribe.py | 35 | 6440 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
import fb303.FacebookService
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(fb303.FacebookService.Iface):
def Log(self, messages):
"""
Parameters:
- messages
"""
pass
class Client(fb303.FacebookService.Client, Iface):
def __init__(self, iprot, oprot=None):
fb303.FacebookService.Client.__init__(self, iprot, oprot)
def Log(self, messages):
"""
Parameters:
- messages
"""
self.send_Log(messages)
return self.recv_Log()
def send_Log(self, messages):
self._oprot.writeMessageBegin('Log', TMessageType.CALL, self._seqid)
args = Log_args()
args.messages = messages
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_Log(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = Log_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "Log failed: unknown result");
class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
def __init__(self, handler):
fb303.FacebookService.Processor.__init__(self, handler)
self._processMap["Log"] = Processor.process_Log
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_Log(self, seqid, iprot, oprot):
args = Log_args()
args.read(iprot)
iprot.readMessageEnd()
result = Log_result()
result.success = self._handler.Log(args.messages)
oprot.writeMessageBegin("Log", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class Log_args:
"""
Attributes:
- messages
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'messages', (TType.STRUCT,(LogEntry, LogEntry.thrift_spec)), None, ), # 1
)
def __init__(self, messages=None,):
self.messages = messages
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.messages = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = LogEntry()
_elem5.read(iprot)
self.messages.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Log_args')
if self.messages != None:
oprot.writeFieldBegin('messages', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.messages))
for iter6 in self.messages:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Log_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Log_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
HwisooSo/gemV-update | src/mem/ExternalSlave.py | 47 | 2722 | # Copyright (c) 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
from m5.params import *
from MemObject import MemObject
class ExternalSlave(MemObject):
type = 'ExternalSlave'
cxx_header = "mem/external_slave.hh"
port = SlavePort("Slave port")
addr_ranges = VectorParam.AddrRange([], 'Addresses served by'
' this port\'s external agent')
port_type = Param.String('stub', 'Registered external port handler'
' to pass this port to in instantiation')
port_data = Param.String('stub', 'A string to pass to the port'
' handler (in a format specific to the handler) to describe how'
' the port should be bound/bindable/discoverable')
| bsd-3-clause |
sorenk/ansible | test/units/modules/remote_management/oneview/test_oneview_ethernet_network.py | 78 | 14524 | # -*- coding: utf-8 -*-
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml
from ansible.compat.tests import unittest, mock
from oneview_module_loader import EthernetNetworkModule, OneViewModuleResourceNotFound
from hpe_test_utils import OneViewBaseTestCase
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_ETHERNET_NAME = 'Test Ethernet Network'
RENAMED_ETHERNET = 'Renamed Ethernet Network'
DEFAULT_ENET_TEMPLATE = dict(
name=DEFAULT_ETHERNET_NAME,
vlanId=200,
ethernetNetworkType="Tagged",
purpose="General",
smartLink=False,
privateNetwork=False,
connectionTemplateUri=None
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_ETHERNET_NAME)
)
PARAMS_TO_RENAME = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_ETHERNET_NAME,
newName=RENAMED_ETHERNET)
)
YAML_PARAMS_WITH_CHANGES = """
config: "config.json"
state: present
data:
name: 'Test Ethernet Network'
purpose: Management
connectionTemplateUri: ~
bandwidth:
maximumBandwidth: 3000
typicalBandwidth: 2000
"""
YAML_RESET_CONNECTION_TEMPLATE = """
config: "{{ config }}"
state: default_bandwidth_reset
data:
name: 'network name'
"""
PARAMS_FOR_SCOPES_SET = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_ETHERNET_NAME)
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_ETHERNET_NAME)
)
PARAMS_FOR_BULK_CREATED = dict(
config='config.json',
state='present',
data=dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10")
)
DEFAULT_BULK_ENET_TEMPLATE = [
{'name': 'TestNetwork_1', 'vlanId': 1},
{'name': 'TestNetwork_2', 'vlanId': 2},
{'name': 'TestNetwork_5', 'vlanId': 5},
{'name': 'TestNetwork_9', 'vlanId': 9},
{'name': 'TestNetwork_10', 'vlanId': 10},
]
DICT_PARAMS_WITH_CHANGES = yaml.load(YAML_PARAMS_WITH_CHANGES)["data"]
class EthernetNetworkModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
def setUp(self):
self.configure_mocks(self, EthernetNetworkModule)
self.resource = self.mock_ov_client.ethernet_networks
def test_should_create_new_ethernet_network(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_CREATED,
ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=EthernetNetworkModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_ENET_TEMPLATE.copy()
data_merged['purpose'] = 'Management'
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_UPDATED,
ansible_facts=dict(ethernet_network=data_merged)
)
def test_update_when_only_bandwidth_has_modified_attributes(self):
self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_UPDATED,
ansible_facts=dict(ethernet_network=DICT_PARAMS_WITH_CHANGES)
)
def test_update_when_data_has_modified_attributes_but_bandwidth_is_equal(self):
data_merged = DEFAULT_ENET_TEMPLATE.copy()
data_merged['purpose'] = 'Management'
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ov_client.connection_templates.get.return_value = {
"bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_UPDATED,
ansible_facts=dict(ethernet_network=data_merged)
)
def test_update_successfully_even_when_connection_template_uri_not_exists(self):
data_merged = DEFAULT_ENET_TEMPLATE.copy()
del data_merged['connectionTemplateUri']
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_UPDATED,
ansible_facts=dict(ethernet_network=data_merged)
)
def test_rename_when_resource_exists(self):
data_merged = DEFAULT_ENET_TEMPLATE.copy()
data_merged['name'] = RENAMED_ETHERNET
params_to_rename = PARAMS_TO_RENAME.copy()
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = params_to_rename
EthernetNetworkModule().run()
self.resource.update.assert_called_once_with(data_merged)
def test_create_with_new_name_when_resource_not_exists(self):
data_merged = DEFAULT_ENET_TEMPLATE.copy()
data_merged['name'] = RENAMED_ETHERNET
params_to_rename = PARAMS_TO_RENAME.copy()
self.resource.get_by.return_value = []
self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
self.mock_ansible_module.params = params_to_rename
EthernetNetworkModule().run()
self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data'])
def test_should_remove_ethernet_network(self):
self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_DELETED
)
def test_should_do_nothing_when_ethernet_network_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=EthernetNetworkModule.MSG_ALREADY_ABSENT
)
def test_should_create_all_ethernet_networks(self):
self.resource.get_range.side_effect = [[], DEFAULT_BULK_ENET_TEMPLATE]
self.resource.create_bulk.return_value = DEFAULT_BULK_ENET_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
EthernetNetworkModule().run()
self.resource.create_bulk.assert_called_once_with(
dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10"))
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_BULK_CREATED,
ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
def test_should_create_missing_ethernet_networks(self):
enet_get_range_return = [
{'name': 'TestNetwork_1', 'vlanId': 1},
{'name': 'TestNetwork_2', 'vlanId': 2},
]
self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
EthernetNetworkModule().run()
self.resource.create_bulk.assert_called_once_with(
dict(namePrefix="TestNetwork", vlanIdRange="5,9,10"))
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True, msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
def test_should_create_missing_ethernet_networks_with_just_one_difference(self):
enet_get_range_return = [
{'name': 'TestNetwork_1', 'vlanId': 1},
{'name': 'TestNetwork_2', 'vlanId': 2},
]
self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5]
self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
EthernetNetworkModule().run()
self.resource.create_bulk.assert_called_once_with({'vlanIdRange': '5-5', 'namePrefix': 'TestNetwork'})
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
def test_should_do_nothing_when_ethernet_networks_already_exist(self):
self.resource.get_range.return_value = DEFAULT_BULK_ENET_TEMPLATE
self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False, msg=EthernetNetworkModule.MSG_BULK_ALREADY_EXIST,
ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
def test_reset_successfully(self):
self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
self.mock_ov_client.connection_templates.update.return_value = {'result': 'success'}
self.mock_ov_client.connection_templates.get.return_value = {
"bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
self.mock_ov_client.connection_templates.get_default.return_value = {"bandwidth": {
"max": 1
}}
self.mock_ansible_module.params = yaml.load(YAML_RESET_CONNECTION_TEMPLATE)
EthernetNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True, msg=EthernetNetworkModule.MSG_CONNECTION_TEMPLATE_RESET,
ansible_facts=dict(ethernet_network_connection_template={'result': 'success'}))
def test_should_fail_when_reset_not_existing_ethernet_network(self):
self.resource.get_by.return_value = [None]
self.mock_ansible_module.params = yaml.load(YAML_RESET_CONNECTION_TEMPLATE)
EthernetNetworkModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=EthernetNetworkModule.MSG_ETHERNET_NETWORK_NOT_FOUND
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_ENET_TEMPLATE.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/ethernet/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
EthernetNetworkModule().run()
self.resource.patch.assert_called_once_with('rest/ethernet/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(ethernet_network=patch_return),
msg=EthernetNetworkModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_ENET_TEMPLATE.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
EthernetNetworkModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(ethernet_network=resource_data),
msg=EthernetNetworkModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
legalsylvain/OpenUpgrade | addons/resource/__openerp__.py | 114 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Resource',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Hidden/Dependency',
'website' : 'http://www.openerp.com',
'description': """
Module for resource management.
===============================
A resource represent something that can be scheduled (a developer on a task or a
work center on manufacturing orders). This module manages a resource calendar
associated to every resource. It also manages the leaves of every resource.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': [
'security/ir.model.access.csv',
'security/resource_security.xml',
'resource_view.xml',
],
'demo': ['resource_demo.xml'],
'test': [
'test/resource.yml',
'test/duplicate_resource.yml',
],
'installable': True,
'auto_install': False,
'images': ['images/resource_leaves_calendar.jpeg','images/resource_leaves_form.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/test/test_math.py | 55 | 38805 | # Python test set -- math module
# XXXX Should not do tests around zero only
from test.test_support import run_unittest, verbose
import unittest
import math
import os
import sys
import random
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags
)
class MathTests(unittest.TestCase):
def ftest(self, name, value, expected):
if abs(value-expected) > eps:
# Use %r instead of %f so the error message
# displays full precision. Otherwise discrepancies
# in the last few bits will lead to very confusing
# error messages
self.fail('%s returned %r, expected %r' %
(name, value, expected))
def testConstants(self):
self.ftest('pi', math.pi, 3.1415926)
self.ftest('e', math.e, 2.7182818)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assert_(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEquals(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assert_(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assert_(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEquals(math.asinh(INF), INF)
self.assertEquals(math.asinh(NINF), NINF)
self.assert_(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assert_(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assert_(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assert_(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assert_(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assert_(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assert_(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assert_(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assert_(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assert_(math.isnan(math.atan2(NAN, NINF)))
self.assert_(math.isnan(math.atan2(NAN, -2.3)))
self.assert_(math.isnan(math.atan2(NAN, -0.)))
self.assert_(math.isnan(math.atan2(NAN, 0.)))
self.assert_(math.isnan(math.atan2(NAN, 2.3)))
self.assert_(math.isnan(math.atan2(NAN, INF)))
self.assert_(math.isnan(math.atan2(NAN, NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
# These types will be int in py3k.
self.assertEquals(float, type(math.ceil(1)))
self.assertEquals(float, type(math.ceil(1L)))
self.assertEquals(float, type(math.ceil(1.0)))
self.ftest('ceil(0.5)', math.ceil(0.5), 1)
self.ftest('ceil(1.0)', math.ceil(1.0), 1)
self.ftest('ceil(1.5)', math.ceil(1.5), 2)
self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
self.assertEquals(math.ceil(INF), INF)
self.assertEquals(math.ceil(NINF), NINF)
self.assert_(math.isnan(math.ceil(NAN)))
class TestCeil(object):
def __float__(self):
return 41.3
class TestNoCeil(object):
pass
self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
if float.__getformat__("double").startswith("IEEE"):
def testCopysign(self):
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEquals(copysign(1., 0.), 1.)
self.assertEquals(copysign(1., -0.), -1.)
self.assertEquals(copysign(INF, 0.), INF)
self.assertEquals(copysign(INF, -0.), NINF)
self.assertEquals(copysign(NINF, 0.), INF)
self.assertEquals(copysign(NINF, -0.), NINF)
# and of infinities
self.assertEquals(copysign(1., INF), 1.)
self.assertEquals(copysign(1., NINF), -1.)
self.assertEquals(copysign(INF, INF), INF)
self.assertEquals(copysign(INF, NINF), NINF)
self.assertEquals(copysign(NINF, INF), INF)
self.assertEquals(copysign(NINF, NINF), NINF)
self.assert_(math.isnan(copysign(NAN, 1.)))
self.assert_(math.isnan(copysign(NAN, INF)))
self.assert_(math.isnan(copysign(NAN, NINF)))
self.assert_(math.isnan(copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assert_(math.isinf(copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEquals(abs(copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0)
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0)
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assert_(math.isnan(math.cos(INF)))
self.assert_(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assert_(math.isnan(math.cos(NAN)))
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEquals(math.cosh(INF), INF)
self.assertEquals(math.cosh(NINF), INF)
self.assert_(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEquals(math.exp(INF), INF)
self.assertEquals(math.exp(NINF), 0.)
self.assert_(math.isnan(math.exp(NAN)))
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
def fact(n):
result = 1
for i in range(1, int(n)+1):
result *= i
return result
values = range(10) + [50, 100, 500]
random.shuffle(values)
for x in range(10):
for cast in (int, long, float):
self.assertEqual(math.factorial(cast(x)), fact(x), (x, fact(x), math.factorial(x)))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, math.pi)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
# These types will be int in py3k.
self.assertEquals(float, type(math.floor(1)))
self.assertEquals(float, type(math.floor(1L)))
self.assertEquals(float, type(math.floor(1.0)))
self.ftest('floor(0.5)', math.floor(0.5), 0)
self.ftest('floor(1.0)', math.floor(1.0), 1)
self.ftest('floor(1.5)', math.floor(1.5), 1)
self.ftest('floor(-0.5)', math.floor(-0.5), -1)
self.ftest('floor(-1.0)', math.floor(-1.0), -1)
self.ftest('floor(-1.5)', math.floor(-1.5), -2)
# pow() relies on floor() to check for integers
# This fails on some platforms - so check it here
self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
self.assertEquals(math.ceil(INF), INF)
self.assertEquals(math.ceil(NINF), NINF)
self.assert_(math.isnan(math.floor(NAN)))
class TestFloor(object):
def __float__(self):
return 42.3
class TestNoFloor(object):
pass
self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10,1)', math.fmod(10,1), 0)
self.ftest('fmod(10,0.5)', math.fmod(10,0.5), 0)
self.ftest('fmod(10,1.5)', math.fmod(10,1.5), 1)
self.ftest('fmod(-10,1)', math.fmod(-10,1), 0)
self.ftest('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
self.ftest('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
self.assert_(math.isnan(math.fmod(NAN, 1.)))
self.assert_(math.isnan(math.fmod(1., NAN)))
self.assert_(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEquals(math.fmod(3.0, INF), 3.0)
self.assertEquals(math.fmod(-3.0, INF), -3.0)
self.assertEquals(math.fmod(3.0, NINF), 3.0)
self.assertEquals(math.fmod(-3.0, NINF), -3.0)
self.assertEquals(math.fmod(0.0, 3.0), 0.0)
self.assertEquals(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, (mant, exp), (emant, eexp)):
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, (mant, exp), (emant,eexp)))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEquals(math.frexp(INF)[0], INF)
self.assertEquals(math.frexp(NINF)[0], NINF)
self.assert_(math.isnan(math.frexp(NAN)[0]))
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
if not float.__getformat__("double").startswith("IEEE"):
return
# on IEEE 754 compliant machines, both of the expressions
# below should round to 10000000000000002.0.
if 1e16+2.0 != 1e16+2.9999:
return
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in xrange(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in xrange(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testHypot(self):
self.assertRaises(TypeError, math.hypot)
self.ftest('hypot(0,0)', math.hypot(0,0), 0)
self.ftest('hypot(3,4)', math.hypot(3,4), 5)
self.assertEqual(math.hypot(NAN, INF), INF)
self.assertEqual(math.hypot(INF, NAN), INF)
self.assertEqual(math.hypot(NAN, NINF), INF)
self.assertEqual(math.hypot(NINF, NAN), INF)
self.assert_(math.isnan(math.hypot(1.0, NAN)))
self.assert_(math.isnan(math.hypot(NAN, -2.0)))
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEquals(math.ldexp(1., -1000000), 0.)
self.assertEquals(math.ldexp(-1., -1000000), -0.)
self.assertEquals(math.ldexp(INF, 30), INF)
self.assertEquals(math.ldexp(NINF, -213), NINF)
self.assert_(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10L**5, 10**10, 10L**10, 10**20, 10**40]:
self.assertEquals(math.ldexp(INF, -n), INF)
self.assertEquals(math.ldexp(NINF, -n), NINF)
self.assertEquals(math.ldexp(1., -n), 0.)
self.assertEquals(math.ldexp(-1., -n), -0.)
self.assertEquals(math.ldexp(0., -n), 0.)
self.assertEquals(math.ldexp(-0., -n), -0.)
self.assert_(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEquals(math.ldexp(0., n), 0.)
self.assertEquals(math.ldexp(-0., n), -0.)
self.assertEquals(math.ldexp(INF, n), INF)
self.assertEquals(math.ldexp(NINF, n), NINF)
self.assert_(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.assertEquals(math.log(INF), INF)
self.assertRaises(ValueError, math.log, NINF)
self.assert_(math.isnan(math.log(NAN)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
self.ftest('log1p(0)', math.log1p(0), 0)
self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
self.ftest('log1p(1)', math.log1p(1), math.log(2))
self.assertEquals(math.log1p(INF), INF)
self.assertRaises(ValueError, math.log1p, NINF)
self.assert_(math.isnan(math.log1p(NAN)))
n= 2**90
self.assertAlmostEquals(math.log1p(n), 62.383246250395075)
self.assertAlmostEquals(math.log1p(n), math.log1p(float(n)))
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.assertEquals(math.log(INF), INF)
self.assertRaises(ValueError, math.log10, NINF)
self.assert_(math.isnan(math.log10(NAN)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, (v1, v2), (e1, e2)):
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, (v1,v2), (e1,e2)))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEquals(math.modf(INF), (0.0, INF))
self.assertEquals(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assert_(math.isnan(modf_nan[0]))
self.assert_(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assert_(math.isnan(math.pow(NAN, 1)))
self.assert_(math.isnan(math.pow(2, NAN)))
self.assert_(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertRaises(ValueError, math.pow, 0., NINF)
self.assert_(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assert_(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertRaises(ValueError, math.pow, -0., NINF)
self.assert_(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assert_(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assert_(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implemention of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assert_(math.isnan(math.sin(INF)))
self.assert_(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assert_(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEquals(math.sinh(INF), INF)
self.assertEquals(math.sinh(NINF), NINF)
self.assert_(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEquals(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assert_(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assert_(math.isnan(math.tan(INF)))
self.assert_(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assert_(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assert_(math.isnan(math.tanh(NAN)))
# check that tanh(-0.) == -0. on IEEE 754 systems
if float.__getformat__("double").startswith("IEEE"):
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc(object):
def __trunc__(self):
return 23
class TestNoTrunc(object):
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
# XXX: This is not ideal, but see the comment in math_trunc().
self.assertRaises(AttributeError, math.trunc, TestNoTrunc())
t = TestNoTrunc()
t.__trunc__ = lambda *args: args
self.assertEquals((), math.trunc(t))
self.assertRaises(TypeError, math.trunc, t, 0)
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
def testIsnan(self):
self.assert_(math.isnan(float("nan")))
self.assert_(math.isnan(float("inf")* 0.))
self.failIf(math.isnan(float("inf")))
self.failIf(math.isnan(0.))
self.failIf(math.isnan(1.))
def testIsinf(self):
self.assert_(math.isinf(float("inf")))
self.assert_(math.isinf(float("-inf")))
self.assert_(math.isinf(1E400))
self.assert_(math.isinf(-1E400))
self.failIf(math.isinf(float("nan")))
self.failIf(math.isinf(0.))
self.failIf(math.isinf(1.))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
if verbose:
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
def test_testfile(self):
if not float.__getformat__("double").startswith("IEEE"):
return
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
func = getattr(math, fn)
try:
result = func(ar)
except ValueError:
message = ("Unexpected ValueError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (id, fn, ar), result, er)
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
| apache-2.0 |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py | 18 | 2184 | # mssql/zxjdbc.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import MSDialect, MSExecutionContext
from ... import engine
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
| mit |
simudream/django-sql-explorer | explorer/south_migrations/0003_auto__del_field_query_created_by__add_field_query_created_by_user.py | 8 | 6728 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Query.created_by'
db.delete_column(u'explorer_query', 'created_by')
# Adding field 'Query.created_by_user'
db.add_column(u'explorer_query', 'created_by_user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Query.created_by'
db.add_column(u'explorer_query', 'created_by',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Deleting field 'Query.created_by_user'
db.delete_column(u'explorer_query', 'created_by_user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
user_model: {
'Meta': {'object_name': user_model.split(".")[1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'explorer.query': {
'Meta': {'ordering': "['title']", 'object_name': 'Query'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_run_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sql': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['explorer'] | mit |
Distrotech/PyQt-x11 | examples/phonon/musicplayer.py | 20 | 12411 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QString', 2)
import sys
from PyQt4 import QtCore, QtGui
try:
from PyQt4.phonon import Phonon
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "Music Player",
"Your Qt installation does not have Phonon support.",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
sys.exit(1)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(QtGui.QMainWindow, self).__init__()
self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
self.mediaObject = Phonon.MediaObject(self)
self.metaInformationResolver = Phonon.MediaObject(self)
self.mediaObject.setTickInterval(1000)
self.mediaObject.tick.connect(self.tick)
self.mediaObject.stateChanged.connect(self.stateChanged)
self.metaInformationResolver.stateChanged.connect(self.metaStateChanged)
self.mediaObject.currentSourceChanged.connect(self.sourceChanged)
self.mediaObject.aboutToFinish.connect(self.aboutToFinish)
Phonon.createPath(self.mediaObject, self.audioOutput)
self.setupActions()
self.setupMenus()
self.setupUi()
self.timeLcd.display("00:00")
self.sources = []
def sizeHint(self):
return QtCore.QSize(500, 300)
def addFiles(self):
files = QtGui.QFileDialog.getOpenFileNames(self, "Select Music Files",
QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.MusicLocation))
if not files:
return
index = len(self.sources)
for string in files:
self.sources.append(Phonon.MediaSource(string))
if self.sources:
self.metaInformationResolver.setCurrentSource(self.sources[index])
def about(self):
QtGui.QMessageBox.information(self, "About Music Player",
"The Music Player example shows how to use Phonon - the "
"multimedia framework that comes with Qt - to create a "
"simple music player.")
def stateChanged(self, newState, oldState):
if newState == Phonon.ErrorState:
if self.mediaObject.errorType() == Phonon.FatalError:
QtGui.QMessageBox.warning(self, "Fatal Error",
self.mediaObject.errorString())
else:
QtGui.QMessageBox.warning(self, "Error",
self.mediaObject.errorString())
elif newState == Phonon.PlayingState:
self.playAction.setEnabled(False)
self.pauseAction.setEnabled(True)
self.stopAction.setEnabled(True)
elif newState == Phonon.StoppedState:
self.stopAction.setEnabled(False)
self.playAction.setEnabled(True)
self.pauseAction.setEnabled(False)
self.timeLcd.display("00:00")
elif newState == Phonon.PausedState:
self.pauseAction.setEnabled(False)
self.stopAction.setEnabled(True)
self.playAction.setEnabled(True)
def tick(self, time):
displayTime = QtCore.QTime(0, (time / 60000) % 60, (time / 1000) % 60)
self.timeLcd.display(displayTime.toString('mm:ss'))
def tableClicked(self, row, column):
wasPlaying = (self.mediaObject.state() == Phonon.PlayingState)
self.mediaObject.stop()
self.mediaObject.clearQueue()
self.mediaObject.setCurrentSource(self.sources[row])
if wasPlaying:
self.mediaObject.play()
else:
self.mediaObject.stop()
def sourceChanged(self, source):
self.musicTable.selectRow(self.sources.index(source))
self.timeLcd.display('00:00')
def metaStateChanged(self, newState, oldState):
if newState == Phonon.ErrorState:
QtGui.QMessageBox.warning(self, "Error opening files",
self.metaInformationResolver.errorString())
while self.sources and self.sources.pop() != self.metaInformationResolver.currentSource():
pass
return
if newState != Phonon.StoppedState and newState != Phonon.PausedState:
return
if self.metaInformationResolver.currentSource().type() == Phonon.MediaSource.Invalid:
return
metaData = self.metaInformationResolver.metaData()
title = metaData.get('TITLE', [''])[0]
if not title:
title = self.metaInformationResolver.currentSource().fileName()
titleItem = QtGui.QTableWidgetItem(title)
titleItem.setFlags(titleItem.flags() ^ QtCore.Qt.ItemIsEditable)
artist = metaData.get('ARTIST', [''])[0]
artistItem = QtGui.QTableWidgetItem(artist)
artistItem.setFlags(artistItem.flags() ^ QtCore.Qt.ItemIsEditable)
album = metaData.get('ALBUM', [''])[0]
albumItem = QtGui.QTableWidgetItem(album)
albumItem.setFlags(albumItem.flags() ^ QtCore.Qt.ItemIsEditable)
year = metaData.get('DATE', [''])[0]
yearItem = QtGui.QTableWidgetItem(year)
yearItem.setFlags(yearItem.flags() ^ QtCore.Qt.ItemIsEditable)
currentRow = self.musicTable.rowCount()
self.musicTable.insertRow(currentRow)
self.musicTable.setItem(currentRow, 0, titleItem)
self.musicTable.setItem(currentRow, 1, artistItem)
self.musicTable.setItem(currentRow, 2, albumItem)
self.musicTable.setItem(currentRow, 3, yearItem)
if not self.musicTable.selectedItems():
self.musicTable.selectRow(0)
self.mediaObject.setCurrentSource(self.metaInformationResolver.currentSource())
index = self.sources.index(self.metaInformationResolver.currentSource()) + 1
if len(self.sources) > index:
self.metaInformationResolver.setCurrentSource(self.sources[index])
else:
self.musicTable.resizeColumnsToContents()
if self.musicTable.columnWidth(0) > 300:
self.musicTable.setColumnWidth(0, 300)
def aboutToFinish(self):
index = self.sources.index(self.mediaObject.currentSource()) + 1
if len(self.sources) > index:
self.mediaObject.enqueue(self.sources[index])
def setupActions(self):
self.playAction = QtGui.QAction(
self.style().standardIcon(QtGui.QStyle.SP_MediaPlay), "Play",
self, shortcut="Ctrl+P", enabled=False,
triggered=self.mediaObject.play)
self.pauseAction = QtGui.QAction(
self.style().standardIcon(QtGui.QStyle.SP_MediaPause),
"Pause", self, shortcut="Ctrl+A", enabled=False,
triggered=self.mediaObject.pause)
self.stopAction = QtGui.QAction(
self.style().standardIcon(QtGui.QStyle.SP_MediaStop), "Stop",
self, shortcut="Ctrl+S", enabled=False,
triggered=self.mediaObject.stop)
self.nextAction = QtGui.QAction(
self.style().standardIcon(QtGui.QStyle.SP_MediaSkipForward),
"Next", self, shortcut="Ctrl+N")
self.previousAction = QtGui.QAction(
self.style().standardIcon(QtGui.QStyle.SP_MediaSkipBackward),
"Previous", self, shortcut="Ctrl+R")
self.addFilesAction = QtGui.QAction("Add &Files", self,
shortcut="Ctrl+F", triggered=self.addFiles)
self.exitAction = QtGui.QAction("E&xit", self, shortcut="Ctrl+X",
triggered=self.close)
self.aboutAction = QtGui.QAction("A&bout", self, shortcut="Ctrl+B",
triggered=self.about)
self.aboutQtAction = QtGui.QAction("About &Qt", self,
shortcut="Ctrl+Q", triggered=QtGui.qApp.aboutQt)
def setupMenus(self):
fileMenu = self.menuBar().addMenu("&File")
fileMenu.addAction(self.addFilesAction)
fileMenu.addSeparator()
fileMenu.addAction(self.exitAction)
aboutMenu = self.menuBar().addMenu("&Help")
aboutMenu.addAction(self.aboutAction)
aboutMenu.addAction(self.aboutQtAction)
def setupUi(self):
bar = QtGui.QToolBar()
bar.addAction(self.playAction)
bar.addAction(self.pauseAction)
bar.addAction(self.stopAction)
self.seekSlider = Phonon.SeekSlider(self)
self.seekSlider.setMediaObject(self.mediaObject)
self.volumeSlider = Phonon.VolumeSlider(self)
self.volumeSlider.setAudioOutput(self.audioOutput)
self.volumeSlider.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
volumeLabel = QtGui.QLabel()
volumeLabel.setPixmap(QtGui.QPixmap('images/volume.png'))
palette = QtGui.QPalette()
palette.setBrush(QtGui.QPalette.Light, QtCore.Qt.darkGray)
self.timeLcd = QtGui.QLCDNumber()
self.timeLcd.setPalette(palette)
headers = ("Title", "Artist", "Album", "Year")
self.musicTable = QtGui.QTableWidget(0, 4)
self.musicTable.setHorizontalHeaderLabels(headers)
self.musicTable.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.musicTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.musicTable.cellPressed.connect(self.tableClicked)
seekerLayout = QtGui.QHBoxLayout()
seekerLayout.addWidget(self.seekSlider)
seekerLayout.addWidget(self.timeLcd)
playbackLayout = QtGui.QHBoxLayout()
playbackLayout.addWidget(bar)
playbackLayout.addStretch()
playbackLayout.addWidget(volumeLabel)
playbackLayout.addWidget(self.volumeSlider)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.musicTable)
mainLayout.addLayout(seekerLayout)
mainLayout.addLayout(playbackLayout)
widget = QtGui.QWidget()
widget.setLayout(mainLayout)
self.setCentralWidget(widget)
self.setWindowTitle("Phonon Music Player")
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setApplicationName("Music Player")
app.setQuitOnLastWindowClosed(True)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| gpl-2.0 |
noba3/KoTos | addons/plugin.image.picasa/gdata/tlslite/utils/Crypto/SelfTest/Signature/test_pkcs1_15.py | 113 | 9468 | # -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_15.py: Self-test for PKCS#1 v1.5 signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto import Random
from Crypto.Signature import PKCS1_v1_5 as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\n', '\t', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
class PKCS1_15_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 v1.5.
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0, after
# hashing it with #3
# Item #3: hash object generator
_testData = (
#
# Taken from ftp://ftp.rsa.com/pub/pkcs/ascii/examples.asc
# "Some Examples of the PKCS Standards", 1999
#
(
# Private key, from 2.1
{
'n':'''0a 66 79 1d c6 98 81 68 de 7a b7 74 19 bb 7f b0 c0 01 c6
27 10 27 00 75 14 29 42 e1 9a 8d 8c 51 d0 53 b3 e3 78 2a 1d
e5 dc 5a f4 eb e9 94 68 17 01 14 a1 df e6 7c dc 9a 9a f5 5d
65 56 20 bb ab''',
'e':'''01 00
01''',
'd':'''01 23 c5 b6 1b a3 6e db 1d 36 79 90 41 99 a8 9e a8 0c 09
b9 12 2e 14 00 c0 9a dc f7 78 46 76 d0 1d 23 35 6a 7d 44 d6
bd 8b d5 0e 94 bf c7 23 fa 87 d8 86 2b 75 17 76 91 c1 1d 75
76 92 df 88 81'''
},
# Data to sign, from 3.1
'''30 81 a4 02 01 00 30 42 31 0b 30 09 06
03 55 04 06 13 02 55 53 31 1d 30 1b 06 03 55 04 0a 13 14
45 78 61 6d 70 6c 65 20 4f 72 67 61 6e 69 7a 61 74 69 6f
6e 31 14 30 12 06 03 55 04 03 13 0b 54 65 73 74 20 55 73
65 72 20 31 30 5b 30 0d 06 09 2a 86 48 86 f7 0d 01 01 01
05 00 03 4a 00 30 47 02 40
0a 66 79 1d c6 98 81 68 de 7a b7 74 19 bb 7f b0
c0 01 c6 27 10 27 00 75 14 29 42 e1 9a 8d 8c 51
d0 53 b3 e3 78 2a 1d e5 dc 5a f4 eb e9 94 68 17
01 14 a1 df e6 7c dc 9a 9a f5 5d 65 56 20 bb ab
02 03 01 00 01''',
# Signature, from 3.2 (at the very end)
'''06 db 36 cb 18 d3 47 5b 9c 01 db 3c 78 95 28 08
02 79 bb ae ff 2b 7d 55 8e d6 61 59 87 c8 51 86
3f 8a 6c 2c ff bc 89 c3 f7 5a 18 d9 6b 12 7c 71
7d 54 d0 d8 04 8d a8 a0 54 46 26 d1 7a 2a 8f be''',
MD2
),
#
# RSA keypair generated with openssl
#
(
"""-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII
q19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8
Wojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI
OQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr
+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK
JACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9
n0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ==
-----END RSA PRIVATE KEY-----""",
"This is a test\x0a",
#
# PKCS#1 signature computed with openssl
#
'''4a700a16432a291a3194646952687d5316458b8b86fb0a25aa30e0dcecdb
442676759ac63d56ec1499c3ae4c0013c2053cabd5b5804848994541ac16
fa243a4d''',
SHA
),
#
# Test vector from http://www.di-mgt.com.au/rsa_alg.html#signpkcs1
#
(
{
'n':'''E08973398DD8F5F5E88776397F4EB005BB5383DE0FB7ABDC7DC775290D052E6D
12DFA68626D4D26FAA5829FC97ECFA82510F3080BEB1509E4644F12CBBD832CF
C6686F07D9B060ACBEEE34096A13F5F7050593DF5EBA3556D961FF197FC981E6
F86CEA874070EFAC6D2C749F2DFA553AB9997702A648528C4EF357385774575F''',
'e':'''010001''',
'd':'''00A403C327477634346CA686B57949014B2E8AD2C862B2C7D748096A8B91F736
F275D6E8CD15906027314735644D95CD6763CEB49F56AC2F376E1CEE0EBF282D
F439906F34D86E085BD5656AD841F313D72D395EFE33CBFF29E4030B3D05A28F
B7F18EA27637B07957D32F2BDE8706227D04665EC91BAF8B1AC3EC9144AB7F21'''
},
"abc",
'''60AD5A78FB4A4030EC542C8974CD15F55384E836554CEDD9A322D5F4135C6267
A9D20970C54E6651070B0144D43844C899320DD8FA7819F7EBC6A7715287332E
C8675C136183B3F8A1F81EF969418267130A756FDBB2C71D9A667446E34E0EAD
9CF31BFB66F816F319D0B7E430A5F2891553986E003720261C7E9022C0D9F11F''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
row = self._testData[i]
# Build the key
if isStr(row[0]):
key = RSA.importKey(row[0])
else:
comps = [ long(rws(row[0][x]),16) for x in ('n','e','d') ]
key = RSA.construct(comps)
h = row[3].new()
# Data to sign can either be in hex form or not
try:
h.update(t2b(row[1]))
except:
h.update(b(row[1]))
# The real test
signer = PKCS.new(key)
self.failUnless(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(row[2]))
def testVerify1(self):
for i in range(len(self._testData)):
row = self._testData[i]
# Build the key
if isStr(row[0]):
key = RSA.importKey(row[0]).publickey()
else:
comps = [ long(rws(row[0][x]),16) for x in ('n','e') ]
key = RSA.construct(comps)
h = row[3].new()
# Data to sign can either be in hex form or not
try:
h.update(t2b(row[1]))
except:
h.update(b(row[1]))
# The real test
verifier = PKCS.new(key)
self.failIf(verifier.can_sign())
result = verifier.verify(h, t2b(row[2]))
self.failUnless(result)
def testSignVerify(self):
rng = Random.new().read
key = RSA.generate(1024, rng)
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,SHA512,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
signer = PKCS.new(key)
s = signer.sign(h)
result = signer.verify(h, s)
self.failUnless(result)
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_15_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
EDUlib/edx-platform | cms/djangoapps/course_creators/tests/test_admin.py | 4 | 8373 | """
Tests course_creators.admin.py.
"""
from unittest import mock
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase
from cms.djangoapps.course_creators.admin import CourseCreatorAdmin
from cms.djangoapps.course_creators.models import CourseCreator
from common.djangoapps.student import auth
from common.djangoapps.student.roles import CourseCreatorRole
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
super().setUp()
self.user = User.objects.create_user('test_user', '[email protected]', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', '[email protected]', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = '[email protected]'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch(
'cms.djangoapps.course_creators.admin.render_to_string',
mock.Mock(side_effect=mock_render_to_string, autospec=True)
)
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/course_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch(
'cms.djangoapps.course_creators.admin.render_to_string',
mock.Mock(side_effect=mock_render_to_string, autospec=True)
)
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
context = {'user_name': 'test_user', 'user_email': '[email protected]'}
self.assertEqual(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEqual(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEqual(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEqual(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEqual(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
def test_rate_limit_login(self):
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
post_params = {'username': self.user.username, 'password': 'wrong_password'}
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for _ in range(30):
response = self.client.post('/admin/login/', post_params)
self.assertEqual(response.status_code, 200)
response = self.client.post('/admin/login/', post_params)
# Since we are using the default rate limit behavior, we are
# expecting this to return a 403 error to indicate that there have
# been too many attempts
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
vnsofthe/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
taito/scrapy | scrapy/utils/request.py | 28 | 3459 | """
This module provides some useful functions for working with
scrapy.http.Request objects
"""
from __future__ import print_function
import hashlib
import weakref
from six.moves.urllib.parse import urlunparse
from w3lib.http import basic_auth_header
from scrapy.utils.python import to_bytes, to_native_str
from w3lib.url import canonicalize_url
from scrapy.utils.httpobj import urlparse_cached
_fingerprint_cache = weakref.WeakKeyDictionary()
def request_fingerprint(request, include_headers=None):
"""
Return the request fingerprint.
The request fingerprint is a hash that uniquely identifies the resource the
request points to. For example, take the following two urls:
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
Even though those are two different URLs both point to the same resource
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
following page is only accesible to authenticated users:
http://www.example.com/members/offers.html
Lot of sites use a cookie to store the session id, which adds a random
component to the HTTP Request and thus should be ignored when calculating
the fingerprint.
For this reason, request headers are ignored by default when calculating
the fingeprint. If you want to include specific headers use the
include_headers argument, which is a list of Request headers to include.
"""
if include_headers:
include_headers = tuple(to_bytes(h.lower())
for h in sorted(include_headers))
cache = _fingerprint_cache.setdefault(request, {})
if include_headers not in cache:
fp = hashlib.sha1()
fp.update(to_bytes(request.method))
fp.update(to_bytes(canonicalize_url(request.url)))
fp.update(request.body or b'')
if include_headers:
for hdr in include_headers:
if hdr in request.headers:
fp.update(hdr)
for v in request.headers.getlist(hdr):
fp.update(v)
cache[include_headers] = fp.hexdigest()
return cache[include_headers]
def request_authenticate(request, username, password):
"""Autenticate the given request (in place) using the HTTP basic access
authentication mechanism (RFC 2617) and the given username and password
"""
request.headers['Authorization'] = basic_auth_header(username, password)
def request_httprepr(request):
"""Return the raw HTTP representation (as bytes) of the given request.
This is provided only for reference since it's not the actual stream of
bytes that will be send when performing the request (that's controlled
by Twisted).
"""
parsed = urlparse_cached(request)
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
s += b"Host: " + to_bytes(parsed.hostname or b'') + b"\r\n"
if request.headers:
s += request.headers.to_string() + b"\r\n"
s += b"\r\n"
s += request.body
return s
def referer_str(request):
""" Return Referer HTTP header suitable for logging. """
referrer = request.headers.get('Referer')
if referrer is None:
return referrer
return to_native_str(referrer, errors='replace')
| bsd-3-clause |
gsmartway/odoo | openerp/addons/base/res/res_bank.py | 242 | 10554 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class Bank(osv.osv):
_description='Bank'
_name = 'res.bank'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'state': fields.many2one("res.country.state", 'Fed. State',
domain="[('country_id', '=', country)]"),
'country': fields.many2one('res.country', 'Country'),
'email': fields.char('Email'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'active': fields.boolean('Active'),
'bic': fields.char('Bank Identifier Code', size=64,
help="Sometimes called BIC or Swift."),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context=None):
result = []
for bank in self.browse(cr, uid, ids, context):
result.append((bank.id, (bank.bic and (bank.bic + ' - ') or '') + bank.name))
return result
class res_partner_bank_type(osv.osv):
_description='Bank Account Type'
_name = 'res.partner.bank.type'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'code': fields.char('Code', size=64, required=True),
'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
'format_layout': fields.text('Format Layout', translate=True)
}
_defaults = {
'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
}
class res_partner_bank_type_fields(osv.osv):
_description='Bank type fields'
_name = 'res.partner.bank.type.field'
_order = 'name'
_columns = {
'name': fields.char('Field Name', required=True, translate=True),
'bank_type_id': fields.many2one('res.partner.bank.type', 'Bank Type', required=True, ondelete='cascade'),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'size': fields.integer('Max. Size'),
}
class res_partner_bank(osv.osv):
'''Bank Accounts'''
_name = "res.partner.bank"
_rec_name = "acc_number"
_description = __doc__
_order = 'sequence'
def _bank_type_get(self, cr, uid, context=None):
bank_type_obj = self.pool.get('res.partner.bank.type')
result = []
type_ids = bank_type_obj.search(cr, uid, [])
bank_types = bank_type_obj.browse(cr, uid, type_ids, context=context)
for bank_type in bank_types:
result.append((bank_type.code, bank_type.name))
return result
def _default_value(self, cursor, user, field, context=None):
if context is None: context = {}
if field in ('country_id', 'state_id'):
value = False
else:
value = ''
if not context.get('address'):
return value
for address in self.pool.get('res.partner').resolve_2many_commands(
cursor, user, 'address', context['address'], ['type', field], context=context):
if address.get('type') == 'default':
return address.get(field, value)
elif not address.get('type'):
value = address.get(field, value)
return value
_columns = {
'name': fields.char('Bank Account'), # to be removed in v6.2 ?
'acc_number': fields.char('Account Number', size=64, required=True),
'bank': fields.many2one('res.bank', 'Bank'),
'bank_bic': fields.char('Bank Identifier Code', size=16),
'bank_name': fields.char('Bank Name'),
'owner_name': fields.char('Account Owner Name'),
'street': fields.char('Street'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'country_id': fields.many2one('res.country', 'Country',
change_default=True),
'state_id': fields.many2one("res.country.state", 'Fed. State',
change_default=True, domain="[('country_id','=',country_id)]"),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', help="Only if this bank account belong to your company"),
'partner_id': fields.many2one('res.partner', 'Account Owner', ondelete='cascade', select=True, domain=['|',('is_company','=',True),('parent_id','=',False)]),
'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
change_default=True),
'sequence': fields.integer('Sequence'),
'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
}
_defaults = {
'owner_name': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'name', context=context),
'street': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'street', context=context),
'city': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'city', context=context),
'zip': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'zip', context=context),
'country_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'country_id', context=context),
'state_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'state_id', context=context),
'name': '/'
}
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(res_partner_bank, self).fields_get(cr, uid, allfields=allfields, context=context, write_access=write_access, attributes=attributes)
bank_type_obj = self.pool.get('res.partner.bank.type')
type_ids = bank_type_obj.search(cr, uid, [])
types = bank_type_obj.browse(cr, uid, type_ids)
for type in types:
for field in type.field_ids:
if field.name in res:
res[field.name].setdefault('states', {})
res[field.name]['states'][type.code] = [
('readonly', field.readonly),
('required', field.required)]
return res
def _prepare_name_get(self, cr, uid, bank_dicts, context=None):
""" Format the name of a res.partner.bank.
This function is designed to be inherited to add replacement fields.
:param bank_dicts: a list of res.partner.bank dicts, as returned by the method read()
:return: [(id, name), ...], as returned by the method name_get()
"""
# prepare a mapping {code: format_layout} for all bank types
bank_type_obj = self.pool.get('res.partner.bank.type')
bank_types = bank_type_obj.browse(cr, uid, bank_type_obj.search(cr, uid, []), context=context)
bank_code_format = dict((bt.code, bt.format_layout) for bt in bank_types)
res = []
for data in bank_dicts:
name = data['acc_number']
if data['state'] and bank_code_format.get(data['state']):
try:
if not data.get('bank_name'):
data['bank_name'] = _('BANK')
data = dict((k, v or '') for (k, v) in data.iteritems())
name = bank_code_format[data['state']] % data
except Exception:
raise osv.except_osv(_("Formating Error"), _("Invalid Bank Account Type Name format."))
res.append((data.get('id', False), name))
return res
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
bank_dicts = self.read(cr, uid, ids, self.fields_get_keys(cr, uid, context=context), context=context)
return self._prepare_name_get(cr, uid, bank_dicts, context=context)
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
result = {}
if company_id:
c = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if c.partner_id:
r = self.onchange_partner_id(cr, uid, ids, c.partner_id.id, context=context)
r['value']['partner_id'] = c.partner_id.id
r['value']['footer'] = 1
result = r
return result
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
result = {}
if bank_id:
bank = self.pool.get('res.bank').browse(cr, uid, bank_id, context=context)
result['bank_name'] = bank.name
result['bank_bic'] = bank.bic
return {'value': result}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
result = {}
if partner_id is not False:
# be careful: partner_id may be a NewId
part = self.pool['res.partner'].browse(cr, uid, [partner_id], context=context)
result['owner_name'] = part.name
result['street'] = part.street or False
result['city'] = part.city or False
result['zip'] = part.zip or False
result['country_id'] = part.country_id.id
result['state_id'] = part.state_id.id
return {'value': result}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AMDmi3/repology | repology/parsers/parsers/rubygem.py | 1 | 1739 | # Copyright (C) 2017 Steve Wills <[email protected]>
# Copyright (C) 2018-2019 Dmitry Marakasov <[email protected]>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
from typing import Iterable
import rubymarshal.reader
from repology.packagemaker import PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.transformer import PackageTransformer
class RubyGemParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]:
with open(path, 'rb') as fd:
for gemname, gemversion, gemplat in rubymarshal.reader.load(fd):
gemname = str(gemname)
with factory.begin(gemname) as pkg:
if gemplat != 'ruby':
pkg.log('skipped, gemplat != ruby')
continue
gemversion = str(gemversion.marshal_dump()[0])
pkg.set_name(gemname)
pkg.set_version(gemversion)
pkg.add_homepages('https://rubygems.org/gems/' + gemname)
yield pkg
| gpl-3.0 |
kaffeel/oppia | core/domain/exp_domain_test.py | 9 | 60159 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
__author__ = 'Sean Lip'
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXPLORATION_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_YAML_CONTENT_WITH_GADGETS = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom: []
left:
- customization_args:
characters:
value: 2
floors:
value: 1
title:
value: The Test Gadget!
gadget_id: TestGadget
visible_in_states:
- New state
- Second state
right: []
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Second state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Second state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXPLORATION_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
TEST_GADGETS = {
'TestGadget': {
'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget')
}
}
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', '', '')
exploration.init_state_name = ''
exploration.states = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'between 1 and 50 characters'):
exploration.validate()
exploration.title = 'Hello #'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character #'):
exploration.validate()
exploration.title = 'Title'
with self.assertRaisesRegexp(
utils.ValidationError, 'between 1 and 50 characters'):
exploration.validate()
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character / in a state name'):
exploration.validate()
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'exploration has no states'):
exploration.validate()
exploration.states = {'A string #': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character # in a state name'):
exploration.validate()
exploration.states = {'A string _': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character _ in a state name'):
exploration.validate()
exploration.states = {'ABC': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'has no initial state name'):
exploration.validate()
exploration.init_state_name = 'initname'
with self.assertRaisesRegexp(
utils.ValidationError,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.'):
exploration.validate()
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'destination ABC is not a valid'):
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}]
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
with self.assertRaisesRegexp(
utils.ValidationError, 'destination DEF is not a valid'):
exploration.validate()
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
with self.assertRaisesRegexp(
utils.ValidationError,
'RuleSpec \'Contains\' is missing inputs'):
exploration.validate()
rule_spec.inputs = 'Inputs string'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected inputs to be a dict'):
exploration.validate()
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
with self.assertRaisesRegexp(
utils.ValidationError, 'Unrecognized rule type'):
exploration.validate()
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
with self.assertRaisesRegexp(
utils.ValidationError, 'RuleSpec \'Contains\' has an input '
'with name \'x\' which refers to an unknown parameter within '
'the exploration: ExampleParam'):
exploration.validate()
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Every outcome should have a destination.'):
exploration.validate()
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected outcome dest to be a string'):
exploration.validate()
outcome.dest = destination
outcome.feedback = 'Feedback'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected outcome feedback to be a list'):
exploration.validate()
outcome.feedback = [15]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected outcome feedback item to be a string'):
exploration.validate()
outcome.feedback = ['Feedback']
exploration.validate()
outcome.param_changes = 'Changes'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected outcome param_changes to be a list'):
exploration.validate()
outcome.param_changes = []
exploration.validate()
# Validate InteractionInstance.
interaction.id = 15
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected interaction id to be a string'):
exploration.validate()
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid interaction id'):
exploration.validate()
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected customization args to be a dict'):
exploration.validate()
interaction.customization_args = {15: ''}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid customization arg name'):
exploration.validate()
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected answer groups to be a list'):
exploration.validate()
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
with self.assertRaisesRegexp(
utils.ValidationError,
'Terminal interactions must not have a default outcome.'):
exploration.validate()
interaction.id = 'TextInput'
interaction.default_outcome = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Non-terminal interactions must have a default outcome.'):
exploration.validate()
interaction.id = 'EndExploration'
with self.assertRaisesRegexp(
utils.ValidationError,
'Terminal interactions must not have any answer groups.'):
exploration.validate()
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
interaction.fallbacks = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected fallbacks to be a list'):
exploration.validate()
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
interaction.fallbacks = []
exploration.validate()
# Validate AnswerGroup.
answer_group.rule_specs = {}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected answer group rules to be a list'):
exploration.validate()
answer_group.rule_specs = []
with self.assertRaisesRegexp(
utils.ValidationError,
'There must be at least one rule for each answer group.'):
exploration.validate()
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code'):
exploration.validate()
exploration.language_code = 'English'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code'):
exploration.validate()
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
with self.assertRaisesRegexp(
utils.ValidationError, 'param_specs to be a dict'):
exploration.validate()
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
with self.assertRaisesRegexp(
utils.ValidationError, 'Only parameter names with characters'):
exploration.validate()
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_fallbacks_validation(self):
"""Test validation of state fallbacks."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', 'Title', 'Category')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
base_outcome = {
'dest': exploration.init_state_name,
'feedback': [],
'param_changes': [],
}
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'FakeTriggerName',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': base_outcome,
}])
with self.assertRaisesRegexp(
utils.ValidationError, 'Unknown trigger type'):
exploration.validate()
with self.assertRaises(KeyError):
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {},
}])
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {},
},
'outcome': base_outcome,
}])
# Default values for the customization args will be added silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 3,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
'bad_key_that_will_get_stripped_silently': {
'value': 'unused_value',
}
},
},
'outcome': base_outcome,
}])
# Unused customization arg keys will be stripped silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 42,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 2,
},
},
},
'outcome': base_outcome,
}])
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', 'Title', 'Category')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected \'tags\' to be a list'):
exploration.validate()
exploration.tags = [123]
with self.assertRaisesRegexp(
utils.ValidationError, 'to be a string'):
exploration.validate()
exploration.tags = ['abc', 123]
with self.assertRaisesRegexp(
utils.ValidationError, 'to be a string'):
exploration.validate()
exploration.tags = ['']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
exploration.validate()
exploration.tags = ['123']
with self.assertRaisesRegexp(
utils.ValidationError,
'should only contain lowercase letters and spaces'):
exploration.validate()
exploration.tags = ['ABC']
with self.assertRaisesRegexp(
utils.ValidationError,
'should only contain lowercase letters and spaces'):
exploration.validate()
exploration.tags = [' a b']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace'):
exploration.validate()
exploration.tags = ['a b ']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace'):
exploration.validate()
exploration.tags = ['a b']
with self.assertRaisesRegexp(
utils.ValidationError,
'Adjacent whitespace in tags should be collapsed'):
exploration.validate()
exploration.tags = ['abc', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Some tags duplicate each other'):
exploration.validate()
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_exploration_skin_and_gadget_validation(self):
"""Test that Explorations including gadgets validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
invalid_gadget_instance = exp_domain.GadgetInstance('bad_ID', [], {})
with self.assertRaisesRegexp(
utils.ValidationError,
'Unknown gadget with ID bad_ID is not in the registry.'):
invalid_gadget_instance.validate()
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'left'][0]
# Force a GadgetInstance to require certain state names.
gadget_instance.visible_in_states.extend(['DEF', 'GHI'])
with self.assertRaisesRegexp(
utils.ValidationError,
'Exploration missing required states: DEF, GHI'):
exploration.validate()
def_state = exp_domain.State.create_default_state('DEF')
def_state.update_interaction_id('TextInput')
exploration.states['DEF'] = def_state
with self.assertRaisesRegexp(
utils.ValidationError,
'Exploration missing required state: GHI'):
exploration.validate()
ghi_state = exp_domain.State.create_default_state('GHI')
ghi_state.update_interaction_id('TextInput')
exploration.states['GHI'] = ghi_state
exploration.validate()
gadget_instance.visible_in_states.extend(['GHI'])
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget specifies visibility repeatedly for state: '
'GHI'):
exploration.validate()
# Remove duplicate state.
gadget_instance.visible_in_states.pop()
# Adding a panel that doesn't exist in the skin.
exploration.skin_instance.panel_contents_dict[
'non_existent_panel'] = []
with self.assertRaisesRegexp(
utils.ValidationError,
'non_existent_panel panel not found in skin '
'conversation_v1'):
exploration.validate()
def test_exploration_get_gadget_ids(self):
"""Test that Exploration.get_gadget_ids returns apt results."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
'An Exploration ID', 'A title', 'Category', SAMPLE_YAML_CONTENT)
self.assertEqual(exploration_without_gadgets.get_gadget_ids(), [])
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(
exploration_with_gadgets.get_gadget_ids(),
['TestGadget']
)
another_gadget = exp_domain.GadgetInstance('AnotherGadget', [], {})
exploration_with_gadgets.skin_instance.panel_contents_dict[
'right'].append(another_gadget)
self.assertEqual(
exploration_with_gadgets.get_gadget_ids(),
['AnotherGadget', 'TestGadget']
)
def test_objective_validation(self):
"""Test that objectives are validated only in 'strict' mode."""
self.save_new_valid_exploration(
'exp_id', '[email protected]', title='Title', category='Category',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration(
'0', 'title', 'category')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration(
'a', 'title', 'category')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration(
'abcd', 'title', 'category')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration(
'0', 'title', 'category')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.create_exploration_from_dict(
demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration(
'0', 'title', 'category')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'A different exploration_id', 'A title', 'A category')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
EXP_ID = 'An exploration_id'
exploration = exp_domain.Exploration.create_default_exploration(
EXP_ID, 'A title', 'A category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.states['New state'].update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
}])
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml(
'exp2', 'Title', 'Category', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp3', 'Title', 'Category', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Title', 'Category',
'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Title', 'Category', 'State1:\n(\nInvalid yaml')
def test_yaml_import_and_export_without_gadgets(self):
"""Test from_yaml() and to_yaml() methods without gadgets."""
EXP_ID = 'An exploration_id'
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
EXP_ID, 'A title', 'Category', SAMPLE_YAML_CONTENT)
yaml_content = exploration_without_gadgets.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_with_gadgets(self):
"""Test from_yaml() and to_yaml() methods including gadgets."""
EXP_ID = 'An exploration_id'
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
EXP_ID, 'A title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
generated_yaml = exploration_with_gadgets.to_yaml()
generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml)
sample_yaml_as_dict = utils.dict_from_yaml(
SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict)
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = (
"""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = (
"""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V9
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
EXP_TITLE = 'A title'
SECOND_STATE_NAME = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', EXP_TITLE, 'A category')
exploration.add_states([SECOND_STATE_NAME])
def _get_default_state_dict(content_str, dest_name):
return {
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': EXP_TITLE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
SECOND_STATE_NAME: _get_default_state_dict(
'', SECOND_STATE_NAME),
},
'param_changes': [],
'param_specs': {},
'skin_customizations': feconf.DEFAULT_SKIN_CUSTOMIZATIONS,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
with self.assertRaises(KeyError):
exploration.states['invalid_state_name']
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class SkinInstanceUnitTests(test_utils.GenericTestBase):
"""Test methods for SkinInstance."""
_SAMPLE_SKIN_INSTANCE_DICT = {
'skin_id': 'conversation_v1',
'skin_customizations': {
'panels_contents': {
'bottom': [],
'left': [
{
'customization_args': {
'characters': {'value': 2},
'floors': {'value': 1},
'title': {'value': 'The Test Gadget!'}},
'gadget_id': 'TestGadget',
'visible_in_states': ['New state', 'Second state']
}
],
'right': []
}
}
}
def test_get_state_names_required_by_gadgets(self):
"""Test accurate computation of state_names_required_by_gadgets."""
skin_instance = exp_domain.SkinInstance(
'conversation_v1',
self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations'])
self.assertEqual(
skin_instance.get_state_names_required_by_gadgets(),
['New state', 'Second state'])
def test_conversion_of_skin_to_and_from_dict(self):
"""Tests conversion of SkinInstance to and from dict representations."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
skin_instance = exploration.skin_instance
skin_instance_as_dict = skin_instance.to_dict()
self.assertEqual(
skin_instance_as_dict,
self._SAMPLE_SKIN_INSTANCE_DICT)
skin_instance_as_instance = exp_domain.SkinInstance.from_dict(
skin_instance_as_dict)
self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1')
self.assertEqual(
sorted(skin_instance_as_instance.panel_contents_dict.keys()),
['bottom', 'left', 'right'])
class GadgetInstanceUnitTests(test_utils.GenericTestBase):
"""Tests methods instantiating and validating GadgetInstances."""
def test_gadget_instantiation(self):
"""Test instantiation of GadgetInstances."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
# Assert left and bottom panels have 1 GadgetInstance. Right has 0.
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'left']), 1)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'bottom']), 0)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'right']), 0)
def test_gadget_instance_properties(self):
"""Test accurate representation of gadget properties."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['left'][0]
self.assertEqual(test_gadget_instance.height, 50)
self.assertEqual(test_gadget_instance.width, 60)
self.assertEqual(
test_gadget_instance.customization_args['title']['value'],
'The Test Gadget!')
self.assertIn('New state', test_gadget_instance.visible_in_states)
def test_gadget_instance_validation(self):
"""Test validation of GadgetInstance."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['left'][0]
# Validation against sample YAML should pass without error.
exploration.validate()
# Assert size exceeded error triggers when a gadget's size exceeds
# a panel's capacity.
with self.swap(
test_gadget_instance.gadget,
'_PIXEL_WIDTH_PER_CHARACTER',
2300):
with self.assertRaisesRegexp(
utils.ValidationError,
'Size exceeded: left panel width of 4600 exceeds limit of '
'100'):
exploration.validate()
# Assert internal validation against CustomizationArgSpecs.
test_gadget_instance.customization_args['floors']['value'] = 5
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadgets are limited to 3 floors, found 5.'):
test_gadget_instance.validate()
test_gadget_instance.customization_args['floors']['value'] = 1
# Assert that too many gadgets in a panel raise a ValidationError.
panel_contents_dict['left'].append(test_gadget_instance)
with self.assertRaisesRegexp(
utils.ValidationError,
'\'left\' panel expected at most 1 gadget, but 2 gadgets are '
'visible in state \'New state\'.'):
exploration.validate()
# Assert that an error is raised when a gadget is not visible in any
# states.
test_gadget_instance.visible_in_states = []
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget gadget not visible in any states.'):
test_gadget_instance.validate()
def test_conversion_of_gadget_instance_to_and_from_dict(self):
"""Test conversion of GadgetInstance to and from dict. """
exploration = exp_domain.Exploration.from_yaml(
'exp1', 'Title', 'Category', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
test_gadget_instance = panel_contents_dict['left'][0]
test_gadget_as_dict = test_gadget_instance.to_dict()
self.assertEqual(
test_gadget_as_dict,
{
'gadget_id': 'TestGadget',
'visible_in_states': ['New state', 'Second state'],
'customization_args': {
'title': {
'value': 'The Test Gadget!'
},
'characters': {
'value': 2
},
'floors': {
'value': 1
}
}
}
)
test_gadget_as_instance = exp_domain.GadgetInstance.from_dict(
test_gadget_as_dict)
self.assertEqual(test_gadget_as_instance.width, 60)
self.assertEqual(test_gadget_as_instance.height, 50)
self.assertEqual(
test_gadget_as_instance.customization_args['title']['value'],
'The Test Gadget!'
)
| apache-2.0 |
samfpetersen/gnuradio | gr-qtgui/examples/pyqt_waterfall_c.py | 38 | 6535 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 2000
npts = 2048
taps = filter.firdes.complex_band_pass_2(1, Rs, 1500, 2500, 100, 60)
self.qapp = QtGui.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
filt = filter.fft_filter_ccc(1, taps)
self.snk1 = qtgui.waterfall_sink_c(npts, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Waterfall Example", 2)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, (self.snk1, 0))
self.connect(thr, filt, (self.snk1, 1))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
stelfrich/openmicroscopy | components/tools/OmeroPy/test/unit/clitest/mocks.py | 20 | 2812 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mock context objects for cli tests.
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import subprocess
import logging
from omero.cli import CLI, NonZeroReturnCode
from omero_ext import mox
from omero_version import ice_compatibility
LOG = logging.getLogger("climocks")
class MockCLI(CLI):
def __init__(self, *args, **kwargs):
self.__expect = []
self.__output = []
self.__error = []
self.__popen = []
self.__call = []
self.mox = mox.Mox()
CLI.__init__(self, *args, **kwargs)
#
# Overrides
#
def input(self, *args, **kwargs):
key = args[0]
for I, T in enumerate(self.__expect):
K, V = T
if key == K:
self.__expect.pop(I)
return V
# Not found
msg = """Couldn't find key: "%s". Options: %s""" % \
(key, [x[0] for x in self.__expect])
print msg
raise Exception(msg)
def out(self, *args, **kwargs):
self.__output.append(args[0])
def err(self, *args):
self.__error.append(args[0])
def call(self, args):
LOG.debug("call:%s" % args)
rv = self.__call.pop(0)
if rv != 0:
raise NonZeroReturnCode(rv)
return 0
def conn(self, *args, **kwargs):
assert False
def popen(self, *args, **kwargs):
LOG.debug("popen:%s {%s}" % (args, kwargs))
return self.__popen.pop(0)
#
# Test methods
#
def expect(self, key, value):
self.__expect.append((key, value))
def assertStdout(self, args):
try:
assert set(args) == set(self.__output)
finally:
self.__output = []
def assertStderr(self, args):
try:
assert set(args) == set(self.__error)
finally:
self.__error = []
def addCall(self, rv):
self.__call.append(rv)
def assertCalled(self):
assert 0 == len(self.__call)
def createPopen(self):
popen = self.mox.CreateMock(subprocess.Popen)
self.__popen.append(popen)
return popen
def checksIceVersion(self):
popen = self.createPopen()
popen.communicate().AndReturn([None, ice_compatibility])
self.replay(popen)
def checksStatus(self, rcode):
popen = self.createPopen()
popen.wait().AndReturn(rcode)
self.replay(popen)
def assertPopened(self):
assert 0 == len(self.__popen)
def replay(self, mock):
mox.Replay(mock)
def teardown_method(self, method):
try:
self.mox.VerifyAll()
finally:
self.mox.UnsetStubs()
| gpl-2.0 |
CatsAndDogsbvba/odoo | addons/google_account/google_account.py | 178 | 8958 | # -*- coding: utf-8 -*-
import openerp
from openerp.http import request
from openerp.osv import osv
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import werkzeug.urls
import urllib2
import simplejson
import logging
_logger = logging.getLogger(__name__)
TIMEOUT = 20
class google_service(osv.osv_memory):
_name = 'google.service'
def generate_refresh_token(self, cr, uid, service, authorization_code, context=None):
ir_config = self.pool['ir.config_parameter']
client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service)
client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_secret' % service)
redirect_uri = ir_config.get_param(cr, SUPERUSER_ID, 'google_redirect_uri')
#Get the Refresh Token From Google And store it in ir.config_parameter
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = dict(code=authorization_code, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri, grant_type="authorization_code")
data = werkzeug.url_encode(data)
try:
req = urllib2.Request("https://accounts.google.com/o/oauth2/token", data, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired"
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
content = simplejson.loads(content)
return content.get('refresh_token')
def _get_google_token_uri(self, cr, uid, service, scope, context=None):
ir_config = self.pool['ir.config_parameter']
params = {
'scope': scope,
'redirect_uri': ir_config.get_param(cr, SUPERUSER_ID, 'google_redirect_uri'),
'client_id': ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service),
'response_type': 'code',
'client_id': ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service),
}
uri = 'https://accounts.google.com/o/oauth2/auth?%s' % werkzeug.url_encode(params)
return uri
# If no scope is passed, we use service by default to get a default scope
def _get_authorize_uri(self, cr, uid, from_url, service, scope=False, context=None):
""" This method return the url needed to allow this instance of OpenErp to access to the scope of gmail specified as parameters """
state_obj = dict(d=cr.dbname, s=service, f=from_url)
base_url = self.get_base_url(cr, uid, context)
client_id = self.get_client_id(cr, uid, service, context)
params = {
'response_type': 'code',
'client_id': client_id,
'state': simplejson.dumps(state_obj),
'scope': scope or 'https://www.googleapis.com/auth/%s' % (service,),
'redirect_uri': base_url + '/google_account/authentication',
'approval_prompt': 'force',
'access_type': 'offline'
}
uri = self.get_uri_oauth(a='auth') + "?%s" % werkzeug.url_encode(params)
return uri
def _get_google_token_json(self, cr, uid, authorize_code, service, context=None):
res = False
base_url = self.get_base_url(cr, uid, context)
client_id = self.get_client_id(cr, uid, service, context)
client_secret = self.get_client_secret(cr, uid, service, context)
params = {
'code': authorize_code,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'redirect_uri': base_url + '/google_account/authentication'
}
headers = {"content-type": "application/x-www-form-urlencoded"}
try:
uri = self.get_uri_oauth(a='token')
data = werkzeug.url_encode(params)
st, res, ask_time = self._do_request(cr, uid, uri, params=data, headers=headers, type='POST', preuri='', context=context)
except urllib2.HTTPError:
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid"
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
return res
def _refresh_google_token_json(self, cr, uid, refresh_token, service, context=None): # exchange_AUTHORIZATION vs Token (service = calendar)
res = False
client_id = self.get_client_id(cr, uid, service, context)
client_secret = self.get_client_secret(cr, uid, service, context)
params = {
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
}
headers = {"content-type": "application/x-www-form-urlencoded"}
try:
uri = self.get_uri_oauth(a='token')
data = werkzeug.url_encode(params)
st, res, ask_time = self._do_request(cr, uid, uri, params=data, headers=headers, type='POST', preuri='', context=context)
except urllib2.HTTPError, e:
if e.code == 400: # invalid grant
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_%s_rtoken' % service: False}, context=context)
error_key = simplejson.loads(e.read()).get("error", "nc")
_logger.exception("Bad google request : %s !" % error_key)
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
return res
def _do_request(self, cr, uid, uri, params={}, headers={}, type='POST', preuri="https://www.googleapis.com", context=None):
if context is None:
context = {}
""" Return a tuple ('HTTP_CODE', 'HTTP_RESPONSE') """
_logger.debug("Uri: %s - Type : %s - Headers: %s - Params : %s !" % (uri, type, headers, werkzeug.url_encode(params) if type == 'GET' else params))
status = 418
response = ""
ask_time = datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
try:
if type.upper() == 'GET' or type.upper() == 'DELETE':
data = werkzeug.url_encode(params)
req = urllib2.Request(preuri + uri + "?" + data)
elif type.upper() == 'POST' or type.upper() == 'PATCH' or type.upper() == 'PUT':
req = urllib2.Request(preuri + uri, params, headers)
else:
raise ('Method not supported [%s] not in [GET, POST, PUT, PATCH or DELETE]!' % (type))
req.get_method = lambda: type.upper()
request = urllib2.urlopen(req, timeout=TIMEOUT)
status = request.getcode()
if int(status) in (204, 404): # Page not found, no response
response = False
else:
content = request.read()
response = simplejson.loads(content)
try:
ask_time = datetime.strptime(request.headers.get('date'), "%a, %d %b %Y %H:%M:%S %Z")
except:
pass
except urllib2.HTTPError, e:
if e.code in (204, 404):
status = e.code
response = ""
else:
_logger.exception("Bad google request : %s !" % e.read())
if e.code in (400, 401, 410):
raise e
raise self.pool.get('res.config.settings').get_config_warning(cr, _("Something went wrong with your request to google"), context=context)
return (status, response, ask_time)
def get_base_url(self, cr, uid, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://www.openerp.com?NoBaseUrl', context=context)
def get_client_id(self, cr, uid, service, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % (service,), default=False, context=context)
def get_client_secret(self, cr, uid, service, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'google_%s_client_secret' % (service,), default=False, context=context)
def get_uri_oauth(self, a=''): # a = optional action
return "https://accounts.google.com/o/oauth2/%s" % (a,)
def get_uri_api(self):
return 'https://www.googleapis.com'
| agpl-3.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/zone/vacant_industrial_job_space.py | 2 | 2200 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from urbansim.gridcell.vacant_industrial_job_space import vacant_industrial_job_space as gc_vacant_industrial_job_space
class vacant_industrial_job_space(Variable):
""" The industrial_sqft/industrial_sqft_per_job - number_of_industrial_jobs. """
number_of_industrial_jobs = "number_of_industrial_jobs"
industrial_sqft = "industrial_sqft"
sqft = "industrial_sqft_per_job"
def dependencies(self):
return [my_attribute_label(self.number_of_industrial_jobs),
"%s = zone.aggregate(gridcell.industrial_sqft, function=sum)" % self.industrial_sqft,
"%s = zone.aggregate(gridcell.industrial_sqft_per_job, function=mean)" % self.sqft]
def compute(self, dataset_pool):
vacant_job_space = gc_vacant_industrial_job_space()
vacant_job_space.set_dataset(self.get_dataset())
return vacant_job_space.compute(dataset_pool)
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "urbansim.zone.vacant_industrial_job_space"
def test_my_inputs(self):
number_of_industrial_jobs = array([12, 0, 39, 0])
industrial_sqft = array([1200, 16, 3900, 15])
industrial_sqft_per_job = array([20, 3, 30, 0])
values = VariableTestToolbox().compute_variable(self.variable_name,
{"zone":{
"number_of_industrial_jobs":number_of_industrial_jobs,
"industrial_sqft":industrial_sqft,
"industrial_sqft_per_job":industrial_sqft_per_job}},
dataset = "zone")
should_be = array([48.0, 5.0, 91.0, 0.0])
self.assertEqual(ma.allequal(values, should_be), True, msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | gpl-2.0 |
fast90/youtube-dl | youtube_dl/extractor/democracynow.py | 15 | 3015 | # coding: utf-8
from __future__ import unicode_literals
import re
import os.path
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
url_basename,
remove_start,
)
class DemocracynowIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)'
IE_NAME = 'democracynow'
_TESTS = [{
'url': 'http://www.democracynow.org/shows/2015/7/3',
'md5': '3757c182d3d84da68f5c8f506c18c196',
'info_dict': {
'id': '2015-0703-001',
'ext': 'mp4',
'title': 'Daily Show',
},
}, {
'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
'info_dict': {
'id': '2015-0703-001',
'ext': 'mp4',
'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
json_data = self._parse_json(self._search_regex(
r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
display_id)
title = json_data['title']
formats = []
video_id = None
for key in ('file', 'audio', 'video', 'high_res_video'):
media_url = json_data.get(key, '')
if not media_url:
continue
media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
formats.append({
'url': media_url,
'vcodec': 'none' if key == 'audio' else None,
})
self._sort_formats(formats)
default_lang = 'en'
subtitles = {}
def add_subtitle_item(lang, info_dict):
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append(info_dict)
# chapter_file are not subtitles
if 'caption_file' in json_data:
add_subtitle_item(default_lang, {
'url': compat_urlparse.urljoin(url, json_data['caption_file']),
})
for subtitle_item in json_data.get('captions', []):
lang = subtitle_item.get('language', '').lower() or default_lang
add_subtitle_item(lang, {
'url': compat_urlparse.urljoin(url, subtitle_item['url']),
})
description = self._og_search_description(webpage, default=None)
return {
'id': video_id or display_id,
'title': title,
'description': description,
'thumbnail': json_data.get('image'),
'subtitles': subtitles,
'formats': formats,
}
| unlicense |
imincik/pkg-qgis-1.8 | src/plugins/plugin_builder.py | 13 | 7100 | #!/usr/bin/python
#***************************************************************************
# plugin_builder.py
# A script to automate creation of a new QGIS plugin using the plugin_template
# --------------------------------------
# Date : Sun Sep 16 12:11:04 AKDT 2007
# Copyright : (C) Copyright 2007 Martin Dobias
# Email :
# Original authors of Perl version: Gary Sherman and Tim Sutton
#***************************************************************************
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License as published by *
#* the Free Software Foundation; either version 2 of the License, or *
#* (at your option) any later version. *
#* *
#***************************************************************************/
import os, sys, shutil, re
def template_file(file):
return os.path.join('plugin_template', file)
def plugin_file(pluginDir, file):
return os.path.join(pluginDir, file)
# make sure we are in the plugins directory otherwise the changes this script will make will
# wreak havoc....
myDir = os.getcwd()
print "Checking that we are in the <qgis dir>/src/plugins/ directory....",
pluginsDirectory = os.path.join('src','plugins')
if myDir[-len(pluginsDirectory):] == pluginsDirectory:
print "yes"
else:
print "no"
print myDir
print "Please relocate to the plugins directory before attempting to run this script."
sys.exit(1)
# get the needed information from the user
print "Enter the directory name under qgis/src/plugins/ where your new plugin will be created."
print "We suggest using a lowercase underscore separated name e.g. clever_plugin"
print "Directory for the new plugin:",
pluginDir = raw_input()
print
print "Enter the name that will be used when creating the plugin library."
print "The name should be entered as a mixed case name with no spaces. e.g. CleverTool"
print "The plugin name will be used in the following ways:"
print "1) it will be 'lower cased' and used as the root of the generated lib name"
print " e.g. libqgis_plugin_clevertool"
print "2) in its upper cased form it will be used as the basis for class names, in particular"
print " CleverToolGuiBase <- The base class for the plugins configuration dialog / gui generated by uic"
print " CleverToolGui <- The concrete class for the plugins configuration dialog"
print "3) CleverTool <- The class that includes the plugin loader instructions and"
print " and calls to your custom application logic"
print "4) clevertool.h, clevertool.cpp etc. <- the filenames used to hold the above derived classes"
print "Plugin name:",
pluginName = raw_input()
pluginLCaseName = pluginName.lower()
print
print "Enter a short description (typically one line)"
print "e.g. The clever plugin does clever stuff in QGIS"
print "Plugin description:",
pluginDescription = raw_input()
print
print "Enter a plugin category. Category will help users"
print "to understand where to find plugin. E.g. if plugin"
print "will be available from Vector menu category is Vector"
print "Plugin category:",
pluginCategory = raw_input()
print
print "Enter the name of the application menu that will be created for your plugin"
print "Clever Tools"
print "Menu name:",
menuName = raw_input()
print
print "Enter the name of the menu entry (under the menu that you have just defined) that"
print "will be used to invoke your plugin. e.g. Clever Plugin"
print "Menu item name:",
menuItemName = raw_input()
# print a summary of what's about to happen
# and ask if we should proceed
print
print "Summary of plugin parameters:"
print "---------------------------------------------"
print "Plugin directory: ", pluginDir
print "Name of the plugin: ", pluginName
print "Description of the plugin:", pluginDescription
print "Category of the plugin: ", pluginCategory
print "Menu name: ", menuName
print "Menu item name: ", menuItemName
print
print "Warning - Proceeding will make changes to CMakeLists.txt in this directory."
print "Create the plugin? [y/n]:",
createIt = raw_input()
if createIt.lower() != 'y':
print "Plugin creation cancelled, exiting"
sys.exit(2)
# create the plugin and modify the build files
# create the new plugin directory
os.mkdir(pluginDir)
# copy files to appropriate names
shutil.copy(template_file('CMakeLists.txt'), pluginDir)
shutil.copy(template_file('README.whatnext'), os.path.join(pluginDir, 'README'))
shutil.copy(template_file('plugin.qrc'), os.path.join(pluginDir, pluginLCaseName + '.qrc'))
shutil.copy(template_file('plugin.png'), os.path.join(pluginDir, pluginLCaseName + '.png'))
shutil.copy(template_file('plugin.cpp'), os.path.join(pluginDir, pluginLCaseName + '.cpp'))
shutil.copy(template_file('plugin.h'), os.path.join(pluginDir, pluginLCaseName + '.h'))
shutil.copy(template_file('plugingui.cpp'), os.path.join(pluginDir, pluginLCaseName + 'gui.cpp'))
shutil.copy(template_file('plugingui.h'), os.path.join(pluginDir, pluginLCaseName + 'gui.h'))
shutil.copy(template_file('pluginguibase.ui'), os.path.join(pluginDir, pluginLCaseName + 'guibase.ui'))
# Substitute the plugin specific vars in the various files
# This is a brute force approach but its quick and dirty :)
#
files = [ plugin_file(pluginDir, 'CMakeLists.txt'),
plugin_file(pluginDir, 'README'),
plugin_file(pluginDir, pluginLCaseName + '.qrc'),
plugin_file(pluginDir, pluginLCaseName + '.cpp'),
plugin_file(pluginDir, pluginLCaseName + '.h'),
plugin_file(pluginDir, pluginLCaseName + 'gui.cpp'),
plugin_file(pluginDir, pluginLCaseName + 'gui.h'),
plugin_file(pluginDir, pluginLCaseName + 'guibase.ui') ]
# replace occurences of [pluginlcasename], [pluginname], [plugindescription], [menuname], [menutiem]
# in template with the values from user
replacements = [ ('\\[pluginlcasename\\]', pluginLCaseName),
('\\[pluginname\\]', pluginName),
('\\[plugindescription\\]', pluginDescription),
('\\[plugincategory\\]', pluginCategory),
('\\[menuname\\]', menuName),
('\\[menuitemname\\]', menuItemName) ]
for file in files:
# read contents of the file
f = open(file)
content = f.read()
f.close()
# replace everything necessary
for repl in replacements:
content = re.sub(repl[0], repl[1], content)
# write changes to the file
f = open(file, "w")
f.write(content)
f.close()
# Add an entry to src/plugins/CMakeLists.txt
f = open('CMakeLists.txt','a')
f.write('\nSUBDIRS ('+pluginDir+')\n')
f.close()
print "Your plugin %s has been created in %s, CMakeLists.txt has been modified." % (pluginName, pluginDir)
print
print "Once your plugin has successfully built, please see %s/README for" % (pluginDir)
print "hints on how to get started."
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.