repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
spacetelescope/stsci.tools | lib/stsci/tools/basicpar.py | 1 | 60106 | """basicpar.py -- General base class for parameter objects. Broken out
from PyRAF's IrafPar class.
$Id$
"""
import re
import sys
from . import irafutils, minmatch
from .irafglobals import INDEF, Verbose, yes, no
int_types = (int, )
# container class used for __deepcopy__ method
class _EmptyClass:
pass
# -----------------------------------------------------
# Warning (non-fatal) error. Raise an exception if in
# strict mode, or print a message if Verbose is on.
# -----------------------------------------------------
# Verbose (set irafglobals.py) determines
# whether warning messages are printed when errors are found. The
# strict parameter to various methods and functions can be set to
# raise an exception on errors; otherwise we do our best to work
# around errors, only raising an exception for really serious,
# unrecoverable problems.
def warning(msg, strict=0, exception=SyntaxError, level=0):
if strict:
raise exception(msg)
elif Verbose>level:
sys.stdout.flush()
sys.stderr.write('Warning: %s' % msg)
if msg[-1:] != '\n': sys.stderr.write('\n')
# -----------------------------------------------------
# basic parameter factory
# -----------------------------------------------------
_string_types = [ 's', 'f', 'struct', 'z' ]
_real_types = [ 'r', 'd' ]
def parFactory(fields, strict=0):
"""parameter factory function
fields is a list of the comma-separated fields (as in the .par file).
Each entry is a string or None (indicating that field was omitted.)
Set the strict parameter to a non-zero value to do stricter parsing
(to find errors in the input)"""
if len(fields) < 3 or None in fields[0:3]:
raise SyntaxError("At least 3 fields must be given")
type = fields[1]
if type in _string_types:
return IrafParS(fields,strict)
elif type == 'R':
return StrictParR(fields,1)
elif type in _real_types:
return IrafParR(fields,strict)
elif type == "I":
return StrictParI(fields,1)
elif type == "i":
return IrafParI(fields,strict)
elif type == "b":
return IrafParB(fields,strict)
elif type == "ar":
return IrafParAR(fields,strict)
elif type == "ai":
return IrafParAI(fields,strict)
elif type == "as":
return IrafParAS(fields,strict)
elif type == "ab":
return IrafParAB(fields,strict)
elif type[:1] == "a":
raise SyntaxError("Cannot handle arrays of type %s" % type)
else:
raise SyntaxError("Cannot handle parameter type %s" % type)
# --------------------------------------------------------
# Publish the (simple) algorithm for combining scope+name
# --------------------------------------------------------
def makeFullName(parScope, parName):
""" Create the fully-qualified name (inclues scope if used) """
# Skip scope (and leading dot) if no scope, even in cases where scope
# IS used for other pars in the same task.
if parScope:
return parScope+'.'+parName
else:
return parName
# -----------------------------------------------------
# Set up minmatch dictionaries for parameter fields
# -----------------------------------------------------
flist = ("p_name", "p_xtype", "p_type", "p_mode", "p_prompt", "p_scope",
"p_value", "p_default", "p_filename", "p_maximum", "p_minimum")
_getFieldDict = minmatch.MinMatchDict()
for field in flist: _getFieldDict.add(field, field)
flist = ("p_prompt", "p_value", "p_filename", "p_maximum", "p_minimum", "p_mode", "p_scope")
_setFieldDict = minmatch.MinMatchDict()
for field in flist: _setFieldDict.add(field, field)
del flist, field
# utility function to check whether string is a parameter field
def isParField(s):
"""Returns true if string s appears to be a parameter field"""
try:
return (s[:2] == "p_") and s in _getFieldDict
except minmatch.AmbiguousKeyError:
# If ambiguous match, assume it is a parameter field.
# An exception will doubtless be raised later, but
# there's really no good choice here.
return 1
# basic IrafPar attributes
# IrafPar's are protected in setattr against adding arbitrary attributes,
# and this dictionary is used as a helper in instance initialization
_IrafPar_attr_dict = {
"name" : None,
"type" : None,
"mode" : None,
"value" : None,
"min" : None,
"max" : None,
"choice" : None,
"choiceDict" : None,
"prompt" : None,
"flags" : 0,
"scope" : None,
}
# flag bits tell whether value has been changed and
# whether it was set on the command line.
_changedFlag = 1
_cmdlineFlag = 2
# -----------------------------------------------------
# IRAF parameter base class
# -----------------------------------------------------
class IrafPar:
"""Non-array IRAF parameter base class"""
def __init__(self,fields,strict=0):
orig_len = len(fields)
if orig_len < 3 or None in fields[0:3]:
raise SyntaxError("At least 3 fields must be given")
#
# all the attributes that are going to get defined
#
self.__dict__.update(_IrafPar_attr_dict)
self.name = fields[0]
self.type = fields[1]
self.mode = fields[2]
self.scope = None # simple default; may be unused
#
# put fields into appropriate attributes
#
while len(fields) < 7: fields.append(None)
#
self.value = self._coerceValue(fields[3],strict)
if fields[4] is not None and '|' in fields[4]:
self._setChoice(fields[4].strip(),strict)
if fields[5] is not None:
if orig_len < 7:
warning("Max value illegal when choice list given" +
" for parameter " + self.name +
" (probably missing comma)",
strict)
# try to recover by assuming max string is prompt
fields[6] = fields[5]
fields[5] = None
else:
warning("Max value illegal when choice list given" +
" for parameter " + self.name, strict)
else:
#XXX should catch ValueError exceptions here and set to null
#XXX could also check for missing comma (null prompt, prompt
#XXX in max field)
if fields[4] is not None:
self.min = self._coerceValue(fields[4],strict)
if fields[5] is not None:
self.max = self._coerceValue(fields[5],strict)
if self.min not in [None, INDEF] and \
self.max not in [None, INDEF] and self.max < self.min:
warning("Max " + str(self.max) + " is less than minimum " + \
str(self.min) + " for parameter " + self.name,
strict)
self.min, self.max = self.max, self.min
if fields[6] is not None:
self.prompt = irafutils.removeEscapes(
irafutils.stripQuotes(fields[6]))
else:
self.prompt = ''
#
# check attributes to make sure they are appropriate for
# this parameter type (e.g. some do not allow choice list
# or min/max)
#
self._checkAttribs(strict)
#
# check parameter value to see if it is correct
#
try:
self.checkValue(self.value,strict)
except ValueError as e:
warning("Illegal initial value for parameter\n" + str(e),
strict, exception=ValueError)
# Set illegal values to None, just like IRAF
self.value = None
#--------------------------------------------
# public accessor methods
#--------------------------------------------
def isLegal(self):
"""Returns true if current parameter value is legal"""
try:
# apply a stricter definition of legal here
# fixable values have already been fixed
# don't accept None values
self.checkValue(self.value)
return self.value is not None
except ValueError:
return 0
def setScope(self,value=''):
"""Set scope value. Written this way to not change the
standard set of fields in the comma-separated list. """
# set through dictionary to avoid extra calls to __setattr__
self.__dict__['scope'] = value
def setCmdline(self,value=1):
"""Set cmdline flag"""
# set through dictionary to avoid extra calls to __setattr__
if value:
self.__dict__['flags'] = self.flags | _cmdlineFlag
else:
self.__dict__['flags'] = self.flags & ~_cmdlineFlag
def isCmdline(self):
"""Return cmdline flag"""
return (self.flags & _cmdlineFlag) == _cmdlineFlag
def setChanged(self,value=1):
"""Set changed flag"""
# set through dictionary to avoid another call to __setattr__
if value:
self.__dict__['flags'] = self.flags | _changedFlag
else:
self.__dict__['flags'] = self.flags & ~_changedFlag
def isChanged(self):
"""Return changed flag"""
return (self.flags & _changedFlag) == _changedFlag
def setFlags(self,value):
"""Set all flags"""
self.__dict__['flags'] = value
def isLearned(self, mode=None):
"""Return true if this parameter is learned
Hidden parameters are not learned; automatic parameters inherit
behavior from package/cl; other parameters are learned.
If mode is set, it determines how automatic parameters behave.
If not set, cl.mode parameter determines behavior.
"""
if "l" in self.mode: return 1
if "h" in self.mode: return 0
if "a" in self.mode:
if mode is None: mode = 'ql' # that is, iraf.cl.mode
if "h" in mode and "l" not in mode:
return 0
return 1
#--------------------------------------------
# other public methods
#--------------------------------------------
def getPrompt(self):
"""Alias for getWithPrompt() for backward compatibility"""
return self.getWithPrompt()
def getWithPrompt(self):
"""Interactively prompt for parameter value"""
if self.prompt:
pstring = self.prompt.split("\n")[0].strip()
else:
pstring = self.name
if self.choice:
schoice = list(map(self.toString, self.choice))
pstring = pstring + " (" + "|".join(schoice) + ")"
elif self.min not in [None, INDEF] or \
self.max not in [None, INDEF]:
pstring = pstring + " ("
if self.min not in [None, INDEF]:
pstring = pstring + self.toString(self.min)
pstring = pstring + ":"
if self.max not in [None, INDEF]:
pstring = pstring + self.toString(self.max)
pstring = pstring + ")"
# add current value as default
if self.value is not None:
pstring = pstring + " (" + self.toString(self.value,quoted=1) + ")"
pstring = pstring + ": "
# don't redirect stdin/out unless redirected filehandles are also ttys
# or unless originals are NOT ttys
stdout = sys.__stdout__
try:
if sys.stdout.isatty() or not stdout.isatty():
stdout = sys.stdout
except AttributeError:
pass
stdin = sys.__stdin__
try:
if sys.stdin.isatty() or not stdin.isatty():
stdin = sys.stdin
except AttributeError:
pass
# print prompt, suppressing both newline and following space
stdout.write(pstring)
stdout.flush()
ovalue = irafutils.tkreadline(stdin)
value = ovalue.strip()
# loop until we get an acceptable value
while (1):
try:
# null input usually means use current value as default
# check it anyway since it might not be acceptable
if value == "": value = self._nullPrompt()
self.set(value)
# None (no value) is not acceptable value after prompt
if self.value is not None: return
# if not EOF, keep looping
if ovalue == "":
stdout.flush()
raise EOFError("EOF on parameter prompt")
print("Error: specify a value for the parameter")
except ValueError as e:
print(str(e))
stdout.write(pstring)
stdout.flush()
ovalue = irafutils.tkreadline(stdin)
value = ovalue.strip()
def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
"""Return value of this parameter as a string (or in native format
if native is non-zero.)"""
if field and field != "p_value":
# note p_value comes back to this routine, so shortcut that case
return self._getField(field,native=native,prompt=prompt)
# may prompt for value if prompt flag is set
if prompt: self._optionalPrompt(mode)
if index is not None:
raise SyntaxError("Parameter "+self.name+" is not an array")
if native:
rv = self.value
else:
rv = self.toString(self.value)
return rv
def set(self, value, field=None, index=None, check=1):
"""Set value of this parameter from a string or other value.
Field is optional parameter field (p_prompt, p_minimum, etc.)
Index is optional array index (zero-based). Set check=0 to
assign the value without checking to see if it is within
the min-max range or in the choice list."""
if index is not None:
raise SyntaxError("Parameter "+self.name+" is not an array")
if field:
self._setField(value,field,check=check)
else:
if check:
self.value = self.checkValue(value)
else:
self.value = self._coerceValue(value)
self.setChanged()
def checkValue(self,value,strict=0):
"""Check and convert a parameter value.
Raises an exception if the value is not permitted for this
parameter. Otherwise returns the value (converted to the
right type.)
"""
v = self._coerceValue(value,strict)
return self.checkOneValue(v,strict)
def checkOneValue(self,v,strict=0):
"""Checks a single value to see if it is in range or choice list
Allows indirection strings starting with ")". Assumes
v has already been converted to right value by
_coerceOneValue. Returns value if OK, or raises
ValueError if not OK.
"""
if v in [None, INDEF] or (isinstance(v,str) and v[:1] == ")"):
return v
elif v == "":
# most parameters treat null string as omitted value
return None
elif self.choice is not None and v not in self.choiceDict:
schoice = list(map(self.toString, self.choice))
schoice = "|".join(schoice)
raise ValueError("Parameter %s: "
"value %s is not in choice list (%s)" %
(self.name, str(v), schoice))
elif (self.min not in [None, INDEF] and v<self.min):
raise ValueError("Parameter %s: "
"value `%s' is less than minimum `%s'" %
(self.name, str(v), str(self.min)))
elif (self.max not in [None, INDEF] and v>self.max):
raise ValueError("Parameter %s: "
"value `%s' is greater than maximum `%s'" %
(self.name, str(v), str(self.max)))
return v
def dpar(self, cl=1):
"""Return dpar-style executable assignment for parameter
Default is to write CL version of code; if cl parameter is
false, writes Python executable code instead.
"""
sval = self.toString(self.value, quoted=1)
if not cl:
if sval == "": sval = "None"
s = "%s = %s" % (self.name, sval)
return s
def fullName(self):
""" Return the fully-qualified name (inclues scope if used) """
return makeFullName(self.scope, self.name) # scope can be None or ''
def pretty(self,verbose=0):
"""Return pretty list description of parameter"""
# split prompt lines and add blanks in later lines to align them
plines = self.prompt.split('\n')
for i in range(len(plines)-1): plines[i+1] = 32*' ' + plines[i+1]
plines = '\n'.join(plines)
namelen = min(len(self.name), 12)
pvalue = self.get(prompt=0,lpar=1)
alwaysquoted = ['s', 'f', '*gcur', '*imcur', '*ukey', 'pset']
if self.type in alwaysquoted and self.value is not None: pvalue = '"' + pvalue + '"'
if self.mode == "h":
s = "%13s = %-15s %s" % ("("+self.name[:namelen],
pvalue+")", plines)
else:
s = "%13s = %-15s %s" % (self.name[:namelen],
pvalue, plines)
if not verbose: return s
if self.choice is not None:
s = s + "\n" + 32*" " + "|"
nline = 33
for i in range(len(self.choice)):
sch = str(self.choice[i]) + "|"
s = s + sch
nline = nline + len(sch) + 1
if nline > 80:
s = s + "\n" + 32*" " + "|"
nline = 33
elif self.min not in [None, INDEF] or self.max not in [None, INDEF]:
s = s + "\n" + 32*" "
if self.min not in [None, INDEF]:
s = s + str(self.min) + " <= "
s = s + self.name
if self.max not in [None, INDEF]:
s = s + " <= " + str(self.max)
return s
def save(self, dolist=0):
"""Return .par format string for this parameter
If dolist is set, returns fields as a list of strings. Default
is to return a single string appropriate for writing to a file.
"""
quoted = not dolist
fields = 7*[""]
fields[0] = self.name
fields[1] = self.type
fields[2] = self.mode
fields[3] = self.toString(self.value,quoted=quoted)
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
schoice.insert(0,'')
schoice.append('')
fields[4] = repr('|'.join(schoice))
elif self.min not in [None,INDEF]:
fields[4] = self.toString(self.min,quoted=quoted)
if self.max not in [None,INDEF]:
fields[5] = self.toString(self.max,quoted=quoted)
if self.prompt:
if quoted:
sprompt = repr(self.prompt)
else:
sprompt = self.prompt
# prompt can have embedded newlines (which are printed)
sprompt = sprompt.replace(r'\012', '\n')
sprompt = sprompt.replace(r'\n', '\n')
fields[6] = sprompt
# delete trailing null parameters
for i in [6,5,4]:
if fields[i] != "": break
del fields[i]
if dolist:
return fields
else:
return ','.join(fields)
#--------------------------------------------
# special methods to give desired object syntax
#--------------------------------------------
# allow parameter object to be used in arithmetic expression
def __coerce__(self, other):
return coerce(self.get(native=1), other)
# fields are accessible as attributes
def __getattr__(self,field):
if field[:1] == '_':
raise AttributeError(field)
try:
return self._getField(field, native=1)
except SyntaxError as e:
if field in _IrafPar_attr_dict:
# handle odd-ball case of new code accessing par's new
# attr (e.g. scope), with old-code-cached version of par
return _IrafPar_attr_dict[field] # return unused default
else:
raise AttributeError(str(e))
def __setattr__(self,attr,value):
# don't allow any new parameters to be added
if attr in self.__dict__:
self.__dict__[attr] = value
elif isParField(attr):
#XXX should check=0 be used here?
self._setField(value, attr)
else:
raise AttributeError("No attribute %s for parameter %s" %
(attr, self.name))
def __deepcopy__(self, memo):
"""Deep copy of this parameter object"""
new = _EmptyClass()
# shallow copy of dictionary suffices for most attributes
new.__dict__ = self.__dict__.copy()
# value, choice may be lists of atomic items
if isinstance(self.value, list):
new.value = list(self.value)
if isinstance(self.choice, list):
new.choice = list(self.choice)
# choiceDict is OK with shallow copy because it will
# always be reset if choices change
new.__class__ = self.__class__
return new
def __getstate__(self):
"""Return state info for pickle"""
# choiceDict gets reconstructed
if self.choice is None:
return self.__dict__
else:
d = self.__dict__.copy()
d['choiceDict'] = None
return d
def __setstate__(self, state):
"""Restore state info from pickle"""
self.__dict__ = state
if self.choice is not None:
self._setChoiceDict()
def __str__(self):
"""Return readable description of parameter"""
s = "<" + self.__class__.__name__ + " " + self.name + " " + self.type
s = s + " " + self.mode + " " + repr(self.value)
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
s = s + " |" + "|".join(schoice) + "|"
else:
s = s + " " + repr(self.min) + " " + repr(self.max)
s = s + ' "' + self.prompt + '">'
return s
#--------------------------------------------
# private methods -- may be used by subclasses, but should
# not be needed outside this module
#--------------------------------------------
def _checkAttribs(self,strict=0):
# by default no restrictions on attributes
pass
def _setChoice(self,s,strict=0):
"""Set choice parameter from string s"""
clist = _getChoice(s,strict)
self.choice = list(map(self._coerceValue, clist))
self._setChoiceDict()
def _setChoiceDict(self):
"""Create dictionary for choice list"""
# value is name of choice parameter (same as key)
self.choiceDict = {}
for c in self.choice: self.choiceDict[c] = c
def _nullPrompt(self):
"""Returns value to use when answer to prompt is null string"""
# most parameters just keep current default (even if None)
return self.value
def _optionalPrompt(self, mode):
"""Interactively prompt for parameter if necessary
Prompt for value if
(1) mode is hidden but value is undefined or bad, or
(2) mode is query and value was not set on command line
Never prompt for "u" mode parameters, which are local variables.
"""
if (self.mode == "h") or (self.mode == "a" and mode == "h"):
# hidden parameter
if not self.isLegal():
self.getWithPrompt()
elif self.mode == "u":
# "u" is a special mode used for local variables in CL scripts
# They should never prompt under any circumstances
if not self.isLegal():
raise ValueError(
"Attempt to access undefined local variable `%s'" %
self.name)
else:
# query parameter
if self.isCmdline()==0:
self.getWithPrompt()
def _getPFilename(self,native,prompt):
"""Get p_filename field for this parameter
Same as get for non-list params
"""
return self.get(native=native,prompt=prompt)
def _getPType(self):
"""Get underlying datatype for this parameter
Just self.type for normal params
"""
return self.type
def _getField(self, field, native=0, prompt=1):
"""Get a parameter field value"""
try:
# expand field name using minimum match
field = _getFieldDict[field]
except KeyError as e:
# re-raise the exception with a bit more info
raise SyntaxError("Cannot get field " + field +
" for parameter " + self.name + "\n" + str(e))
if field == "p_value":
# return value of parameter
# Note that IRAF returns the filename for list parameters
# when p_value is used. I consider this a bug, and it does
# not appear to be used by any cl scripts or SPP programs
# in either IRAF or STSDAS. It is also in conflict with
# the IRAF help documentation. I am making p_value exactly
# the same as just a simple CL parameter reference.
return self.get(native=native,prompt=prompt)
elif field == "p_name": return self.name
elif field == "p_xtype": return self.type
elif field == "p_type": return self._getPType()
elif field == "p_mode": return self.mode
elif field == "p_prompt": return self.prompt
elif field == "p_scope": return self.scope
elif field == "p_default" or field == "p_filename":
# these all appear to be equivalent -- they just return the
# current PFilename of the parameter (which is the same as the value
# for non-list parameters, and is the filename for list parameters)
return self._getPFilename(native,prompt)
elif field == "p_maximum":
if native:
return self.max
else:
return self.toString(self.max)
elif field == "p_minimum":
if self.choice is not None:
if native:
return self.choice
else:
schoice = list(map(self.toString, self.choice))
return "|" + "|".join(schoice) + "|"
else:
if native:
return self.min
else:
return self.toString(self.min)
else:
# XXX unimplemented fields:
# p_length: maximum string length in bytes -- what to do with it?
raise RuntimeError("Program bug in IrafPar._getField()\n" +
"Requested field " + field + " for parameter " + self.name)
def _setField(self, value, field, check=1):
"""Set a parameter field value"""
try:
# expand field name using minimum match
field = _setFieldDict[field]
except KeyError as e:
raise SyntaxError("Cannot set field " + field +
" for parameter " + self.name + "\n" + str(e))
if field == "p_prompt":
self.prompt = irafutils.removeEscapes(irafutils.stripQuotes(value))
elif field == "p_value":
self.set(value,check=check)
elif field == "p_filename":
# this is only relevant for list parameters (*imcur, *gcur, etc.)
self.set(value,check=check)
elif field == "p_scope":
self.scope = value
elif field == "p_maximum":
self.max = self._coerceOneValue(value)
elif field == "p_minimum":
if isinstance(value,str) and '|' in value:
self._setChoice(irafutils.stripQuotes(value))
else:
self.min = self._coerceOneValue(value)
elif field == "p_mode":
# not doing any type or value checking here -- setting mode is
# rare, so assume that it is being done correctly
self.mode = irafutils.stripQuotes(value)
else:
raise RuntimeError("Program bug in IrafPar._setField()" +
"Requested field " + field + " for parameter " + self.name)
def _coerceValue(self,value,strict=0):
"""Coerce parameter to appropriate type
Should accept None or null string.
"""
return self._coerceOneValue(value,strict)
def _coerceOneValue(self,value,strict=0):
"""Coerce a scalar parameter to the appropriate type
Default implementation simply prevents direct use of base class.
Should accept None or null string.
"""
raise NotImplementedError("class IrafPar cannot be used directly")
# -----------------------------------------------------
# IRAF array parameter base class
# -----------------------------------------------------
class IrafArrayPar(IrafPar):
"""IRAF array parameter class"""
def __init__(self,fields,strict=0):
orig_len = len(fields)
if orig_len < 3:
raise SyntaxError("At least 3 fields must be given")
#
# all the attributes that are going to get defined
#
self.__dict__.update(_IrafPar_attr_dict)
self.name = fields[0]
self.type = fields[1]
self.mode = fields[2]
self.__dict__['shape'] = None
#
# for array parameters, dimensions follow mode field
# and values come from fields after prompt
#
if len(fields)<4 or fields[3] is None:
raise ValueError("Missing dimension field for array parameter")
ndim = int(fields[3])
if len(fields) < 4+2*ndim:
raise ValueError("Missing array shape fields for array parameter")
shape = []
array_size = 1
for i in range(ndim):
shape.append(int(fields[4+2*i]))
array_size = array_size*shape[-1]
self.shape = tuple(shape)
nvstart = 7+2*ndim
fields.extend([""]*(nvstart-len(fields)))
fields.extend([None]*(nvstart+array_size-len(fields)))
if len(fields) > nvstart+array_size:
raise SyntaxError("Too many values for array" +
" for parameter " + self.name)
#
self.value = [None]*array_size
self.value = self._coerceValue(fields[nvstart:],strict)
if fields[nvstart-3] is not None and '|' in fields[nvstart-3]:
self._setChoice(fields[nvstart-3].strip(),strict)
if fields[nvstart-2] is not None:
if orig_len < nvstart:
warning("Max value illegal when choice list given" +
" for parameter " + self.name +
" (probably missing comma)",
strict)
# try to recover by assuming max string is prompt
#XXX risky -- all init values might be off by one
fields[nvstart-1] = fields[nvstart-2]
fields[nvstart-2] = None
else:
warning("Max value illegal when choice list given" +
" for parameter " + self.name, strict)
else:
self.min = self._coerceOneValue(fields[nvstart-3],strict)
self.max = self._coerceOneValue(fields[nvstart-2],strict)
if fields[nvstart-1] is not None:
self.prompt = irafutils.removeEscapes(
irafutils.stripQuotes(fields[nvstart-1]))
else:
self.prompt = ''
if self.min not in [None, INDEF] and \
self.max not in [None, INDEF] and self.max < self.min:
warning("Maximum " + str(self.max) + " is less than minimum " + \
str(self.min) + " for parameter " + self.name,
strict)
self.min, self.max = self.max, self.min
#
# check attributes to make sure they are appropriate for
# this parameter type (e.g. some do not allow choice list
# or min/max)
#
self._checkAttribs(strict)
#
# check parameter value to see if it is correct
#
try:
self.checkValue(self.value,strict)
except ValueError as e:
warning("Illegal initial value for parameter\n" + str(e),
strict, exception=ValueError)
# Set illegal values to None, just like IRAF
self.value = None
#--------------------------------------------
# public methods
#--------------------------------------------
def save(self, dolist=0):
"""Return .par format string for this parameter
If dolist is set, returns fields as a list of strings. Default
is to return a single string appropriate for writing to a file.
"""
quoted = not dolist
array_size = 1
for d in self.shape:
array_size = d*array_size
ndim = len(self.shape)
fields = (7+2*ndim+len(self.value))*[""]
fields[0] = self.name
fields[1] = self.type
fields[2] = self.mode
fields[3] = str(ndim)
next = 4
for d in self.shape:
fields[next] = str(d); next += 1
fields[next] = '1'; next += 1
nvstart = 7+2*ndim
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
schoice.insert(0,'')
schoice.append('')
fields[nvstart-3] = repr('|'.join(schoice))
elif self.min not in [None,INDEF]:
fields[nvstart-3] = self.toString(self.min,quoted=quoted)
# insert an escaped line break before min field
if quoted:
fields[nvstart-3] = '\\\n' + fields[nvstart-3]
if self.max not in [None,INDEF]:
fields[nvstart-2] = self.toString(self.max,quoted=quoted)
if self.prompt:
if quoted:
sprompt = repr(self.prompt)
else:
sprompt = self.prompt
# prompt can have embedded newlines (which are printed)
sprompt = sprompt.replace(r'\012', '\n')
sprompt = sprompt.replace(r'\n', '\n')
fields[nvstart-1] = sprompt
for i in range(len(self.value)):
fields[nvstart+i] = self.toString(self.value[i],quoted=quoted)
# insert an escaped line break before value fields
if dolist:
return fields
else:
fields[nvstart] = '\\\n' + fields[nvstart]
return ','.join(fields)
def dpar(self, cl=1):
"""Return dpar-style executable assignment for parameter
Default is to write CL version of code; if cl parameter is
false, writes Python executable code instead. Note that
dpar doesn't even work for arrays in the CL, so we just use
Python syntax here.
"""
sval = list(map(self.toString, self.value, len(self.value)*[1]))
for i in range(len(sval)):
if sval[i] == "":
sval[i] = "None"
s = "%s = [%s]" % (self.name, ', '.join(sval))
return s
def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
"""Return value of this parameter as a string (or in native format
if native is non-zero.)"""
if field: return self._getField(field,native=native,prompt=prompt)
# may prompt for value if prompt flag is set
#XXX should change _optionalPrompt so we prompt for each element of
#XXX the array separately? I think array parameters are
#XXX not useful as non-hidden params.
if prompt: self._optionalPrompt(mode)
if index is not None:
sumindex = self._sumindex(index)
try:
if native:
return self.value[sumindex]
else:
return self.toString(self.value[sumindex])
except IndexError:
# should never happen
raise SyntaxError("Illegal index [" + repr(sumindex) +
"] for array parameter " + self.name)
elif native:
# return object itself for an array because it is
# indexable, can have values assigned, etc.
return self
else:
# return blank-separated string of values for array
return str(self)
def set(self, value, field=None, index=None, check=1):
"""Set value of this parameter from a string or other value.
Field is optional parameter field (p_prompt, p_minimum, etc.)
Index is optional array index (zero-based). Set check=0 to
assign the value without checking to see if it is within
the min-max range or in the choice list."""
if index is not None:
sumindex = self._sumindex(index)
try:
value = self._coerceOneValue(value)
if check:
self.value[sumindex] = self.checkOneValue(value)
else:
self.value[sumindex] = value
return
except IndexError:
# should never happen
raise SyntaxError("Illegal index [" + repr(sumindex) +
"] for array parameter " + self.name)
if field:
self._setField(value,field,check=check)
else:
if check:
self.value = self.checkValue(value)
else:
self.value = self._coerceValue(value)
self.setChanged()
def checkValue(self,value,strict=0):
"""Check and convert a parameter value.
Raises an exception if the value is not permitted for this
parameter. Otherwise returns the value (converted to the
right type.)
"""
v = self._coerceValue(value,strict)
for i in range(len(v)):
self.checkOneValue(v[i],strict=strict)
return v
#--------------------------------------------
# special methods
#--------------------------------------------
# array parameters can be subscripted
# note subscripts start at zero, unlike CL subscripts
# that start at one
def __getitem__(self, index):
return self.get(index=index,native=1)
def __setitem__(self, index, value):
self.set(value, index=index)
def __str__(self):
"""Return readable description of parameter"""
# This differs from non-arrays in that it returns a
# print string with just the values. That's because
# the object itself is returned as the native value.
sv = list(map(str, self.value))
for i in range(len(sv)):
if self.value[i] is None:
sv[i] = "INDEF"
return ' '.join(sv)
def __len__(self):
return len(self.value)
#--------------------------------------------
# private methods
#--------------------------------------------
def _sumindex(self, index=None):
"""Convert tuple index to 1-D index into value"""
try:
ndim = len(index)
except TypeError:
# turn index into a 1-tuple
index = (index,)
ndim = 1
if len(self.shape) != ndim:
raise ValueError("Index to %d-dimensional array %s has too %s dimensions" %
(len(self.shape), self.name, ["many","few"][len(self.shape) > ndim]))
sumindex = 0
for i in range(ndim-1,-1,-1):
index1 = index[i]
if index1 < 0 or index1 >= self.shape[i]:
raise ValueError("Dimension %d index for array %s is out of bounds (value=%d)" %
(i+1, self.name, index1))
sumindex = index1 + sumindex*self.shape[i]
return sumindex
def _getPType(self):
"""Get underlying datatype for this parameter (strip off 'a' array params)"""
return self.type[1:]
def _coerceValue(self,value,strict=0):
"""Coerce parameter to appropriate type
Should accept None or null string. Must be an array.
"""
try:
if isinstance(value,str):
# allow single blank-separated string as input
value = value.split()
if len(value) != len(self.value):
raise IndexError
v = len(self.value)*[0]
for i in range(len(v)):
v[i] = self._coerceOneValue(value[i],strict)
return v
except (IndexError, TypeError):
raise ValueError("Value must be a " + repr(len(self.value)) +
"-element array for " + self.name)
def isLegal(self):
"""Dont call checkValue for arrays"""
try:
return self.value is not None
except ValueError:
return 0
# -----------------------------------------------------
# IRAF string parameter mixin class
# -----------------------------------------------------
class _StringMixin:
"""IRAF string parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
"""Convert a single (non-array) value of the appropriate type for
this parameter to a string"""
if value is None:
return ""
elif quoted:
return repr(value)
else:
return value
# slightly modified checkOneValue allows minimum match for
# choice strings and permits null string as value
def checkOneValue(self,v,strict=0):
if v is None or v[:1] == ")":
return v
elif self.choice is not None:
try:
v = self.choiceDict[v]
except minmatch.AmbiguousKeyError:
clist = self.choiceDict.getall(v)
raise ValueError("Parameter %s: "
"ambiguous value `%s', could be %s" %
(self.name, str(v), "|".join(clist)))
except KeyError:
raise ValueError("Parameter %s: "
"value `%s' is not in choice list (%s)" %
(self.name, str(v), "|".join(self.choice)))
elif (self.min is not None and v<self.min):
raise ValueError("Parameter %s: "
"value `%s' is less than minimum `%s'" %
(self.name, str(v), str(self.min)))
elif (self.max is not None and v>self.max):
raise ValueError("Parameter %s: "
"value `%s' is greater than maximum `%s'" %
(self.name, str(v), str(self.max)))
return v
#--------------------------------------------
# private methods
#--------------------------------------------
def _checkAttribs(self, strict):
"""Check initial attributes to make sure they are legal"""
if self.min:
warning("Minimum value not allowed for string-type parameter " +
self.name, strict)
self.min = None
if self.max:
if not self.prompt:
warning("Maximum value not allowed for string-type parameter " +
self.name + " (probably missing comma)",
strict)
# try to recover by assuming max string is prompt
self.prompt = self.max
else:
warning("Maximum value not allowed for string-type parameter " +
self.name, strict)
self.max = None
# If not in strict mode, allow file (f) to act just like string (s).
# Otherwise choice is also forbidden for file type
if strict and self.type == "f" and self.choice:
warning("Illegal choice value for type '" +
self.type + "' for parameter " + self.name,
strict)
self.choice = None
def _setChoiceDict(self):
"""Create min-match dictionary for choice list"""
# value is full name of choice parameter
self.choiceDict = minmatch.MinMatchDict()
for c in self.choice: self.choiceDict.add(c, c)
def _nullPrompt(self):
"""Returns value to use when answer to prompt is null string"""
# for string, null string is a legal value
# keep current default unless it is None
if self.value is None:
return ""
else:
return self.value
def _coerceOneValue(self,value,strict=0):
if value is None:
return value
elif isinstance(value,str):
# strip double quotes and remove escapes before quotes
return irafutils.removeEscapes(irafutils.stripQuotes(value))
else:
return str(value)
# -----------------------------------------------------
# IRAF string parameter class
# -----------------------------------------------------
class IrafParS(_StringMixin, IrafPar):
"""IRAF string parameter class"""
pass
# -----------------------------------------------------
# IRAF string array parameter class
# -----------------------------------------------------
class IrafParAS(_StringMixin,IrafArrayPar):
"""IRAF string array parameter class"""
pass
# -----------------------------------------------------
# IRAF boolean parameter mixin class
# -----------------------------------------------------
class _BooleanMixin:
"""IRAF boolean parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
if value in [None, INDEF]:
return ""
elif isinstance(value,str):
# presumably an indirection value ')task.name'
if quoted:
return repr(value)
else:
return value
else:
# must be internal yes, no value
return str(value)
#--------------------------------------------
# private methods
#--------------------------------------------
def _checkAttribs(self, strict):
"""Check initial attributes to make sure they are legal"""
if self.min:
warning("Minimum value not allowed for boolean-type parameter " +
self.name, strict)
self.min = None
if self.max:
if not self.prompt:
warning("Maximum value not allowed for boolean-type parameter " +
self.name + " (probably missing comma)",
strict)
# try to recover by assuming max string is prompt
self.prompt = self.max
else:
warning("Maximum value not allowed for boolean-type parameter " +
self.name, strict)
self.max = None
if self.choice:
warning("Choice values not allowed for boolean-type parameter " +
self.name, strict)
self.choice = None
# accepts special yes, no objects, integer values 0,1 or
# string 'yes','no' and variants
# internal value is yes, no, None/INDEF, or indirection string
def _coerceOneValue(self,value,strict=0):
if value == INDEF:
return INDEF
elif value is None or value == "":
return None
elif value in (1, 1.0, yes, "yes", "YES", "y", "Y", True):
return yes
elif value in (0, 0.0, no, "no", "NO", "n", "N", False):
return no
elif isinstance(value,str):
v2 = irafutils.stripQuotes(value.strip())
if v2 == "" or v2 == "INDEF" or \
((not strict) and (v2.upper() == "INDEF")):
return INDEF
elif v2[0:1] == ")":
# assume this is indirection -- just save it as a string
return v2
raise ValueError("Parameter %s: illegal boolean value %s or type %s" %
(self.name, repr(value), str(type(value))))
# -----------------------------------------------------
# IRAF boolean parameter class
# -----------------------------------------------------
class IrafParB(_BooleanMixin,IrafPar):
"""IRAF boolean parameter class"""
pass
# -----------------------------------------------------
# IRAF boolean array parameter class
# -----------------------------------------------------
class IrafParAB(_BooleanMixin,IrafArrayPar):
"""IRAF boolean array parameter class"""
pass
# -----------------------------------------------------
# IRAF integer parameter mixin class
# -----------------------------------------------------
class _IntMixin:
"""IRAF integer parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
if value is None:
return ""
else:
return str(value)
#--------------------------------------------
# private methods
#--------------------------------------------
# coerce value to integer
def _coerceOneValue(self,value,strict=0):
if value == INDEF:
return INDEF
elif value is None or isinstance(value,int):
return value
elif value in ("", "None", "NONE"):
return None
elif isinstance(value,float):
# try converting to integer
try:
return int(value)
except (ValueError, OverflowError):
pass
elif isinstance(value,str):
s2 = irafutils.stripQuotes(value.strip())
if s2 == "INDEF" or \
((not strict) and (s2.upper() == "INDEF")):
return INDEF
elif s2[0:1] == ")":
# assume this is indirection -- just save it as a string
return s2
elif s2[-1:] == "x":
# hexadecimal
return int(s2[:-1],16)
elif "." in s2:
# try interpreting as a float and converting to integer
try:
return int(float(s2))
except (ValueError, OverflowError):
pass
else:
try:
return int(s2)
except ValueError:
pass
else:
# maybe it has an int method
try:
return int(value)
except ValueError:
pass
raise ValueError("Parameter %s: illegal integer value %s" %
(self.name, repr(value)))
# -----------------------------------------------------
# IRAF integer parameter class
# -----------------------------------------------------
class IrafParI(_IntMixin,IrafPar):
"""IRAF integer parameter class"""
pass
# -----------------------------------------------------
# IRAF integer array parameter class
# -----------------------------------------------------
class IrafParAI(_IntMixin,IrafArrayPar):
"""IRAF integer array parameter class"""
pass
# -----------------------------------------------------
# Strict integer parameter mixin class
# -----------------------------------------------------
class _StrictIntMixin(_IntMixin):
"""Strict integer parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
return str(value)
#--------------------------------------------
# private methods
#--------------------------------------------
# coerce value to integer
def _coerceOneValue(self,value,strict=0):
if value is None or isinstance(value,int):
return value
elif isinstance(value,str):
s2 = irafutils.stripQuotes(value.strip())
if s2[-1:] == "x":
# hexadecimal
return int(s2[:-1],16)
elif s2 == '':
raise ValueError('Parameter '+self.name+ \
': illegal empty integer value')
else:
# see if it is a stringified int
try:
return int(s2)
except ValueError:
pass
# otherwise it is not a strict integer
raise ValueError("Parameter %s: illegal integer value %s" %
(self.name, repr(value)))
# -----------------------------------------------------
# Strict integer parameter class
# -----------------------------------------------------
class StrictParI(_StrictIntMixin,IrafPar):
"""Strict integer parameter class"""
pass
# -----------------------------------------------------
# IRAF real parameter mixin class
# -----------------------------------------------------
_re_d = re.compile(r'[Dd]')
_re_colon = re.compile(r':')
class _RealMixin:
"""IRAF real parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
if value is None:
return ""
else:
return str(value)
#--------------------------------------------
# private methods
#--------------------------------------------
def _checkAttribs(self, strict):
"""Check initial attributes to make sure they are legal"""
if self.choice:
warning("Choice values not allowed for real-type parameter " +
self.name, strict)
self.choice = None
# coerce value to real
def _coerceOneValue(self,value,strict=0):
if value == INDEF:
return INDEF
elif value is None or isinstance(value,float):
return value
elif value in ("", "None", "NONE"):
return None
elif isinstance(value, int_types):
return float(value)
elif isinstance(value,str):
s2 = irafutils.stripQuotes(value.strip())
if s2 == "INDEF" or \
((not strict) and (s2.upper() == "INDEF")):
return INDEF
elif s2[0:1] == ")":
# assume this is indirection -- just save it as a string
return s2
# allow +dd:mm:ss.s sexagesimal format for floats
fvalue = 0.0
vscale = 1.0
vsign = 1
i1 = 0
mm = _re_colon.search(s2)
if mm is not None:
if s2[0:1] == "-":
i1 = 1
vsign = -1
elif s2[0:1] == "+":
i1 = 1
while mm is not None:
i2 = mm.start()
fvalue = fvalue + int(s2[i1:i2])/vscale
i1 = i2+1
vscale = vscale*60.0
mm = _re_colon.search(s2,i1)
# special handling for d exponential notation
mm = _re_d.search(s2,i1)
try:
if mm is None:
return vsign*(fvalue + float(s2[i1:])/vscale)
else:
return vsign*(fvalue + \
float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale)
except ValueError:
pass
else:
# maybe it has a float method
try:
return float(value)
except ValueError:
pass
raise ValueError("Parameter %s: illegal float value %s" %
(self.name, repr(value)))
# -----------------------------------------------------
# IRAF real parameter class
# -----------------------------------------------------
class IrafParR(_RealMixin,IrafPar):
"""IRAF real parameter class"""
pass
# -----------------------------------------------------
# IRAF real array parameter class
# -----------------------------------------------------
class IrafParAR(_RealMixin,IrafArrayPar):
"""IRAF real array parameter class"""
pass
# -----------------------------------------------------
# Strict real parameter mixin class
# -----------------------------------------------------
class _StrictRealMixin(_RealMixin):
"""Strict real parameter mixin class"""
#--------------------------------------------
# public methods
#--------------------------------------------
def toString(self, value, quoted=0):
return str(value)
#--------------------------------------------
# private methods
#--------------------------------------------
# coerce value to real
def _coerceOneValue(self,value,strict=0):
if value is None or isinstance(value,float):
return value
elif isinstance(value, int_types):
return float(value)
elif isinstance(value,str):
s2 = irafutils.stripQuotes(value.strip())
if s2 == '':
raise ValueError('Parameter '+self.name+ \
': illegal empty float value')
# allow +dd:mm:ss.s sexagesimal format for floats
fvalue = 0.0
vscale = 1.0
vsign = 1
i1 = 0
mm = _re_colon.search(s2)
if mm is not None:
if s2[0:1] == "-":
i1 = 1
vsign = -1
elif s2[0:1] == "+":
i1 = 1
while mm is not None:
i2 = mm.start()
fvalue = fvalue + int(s2[i1:i2])/vscale
i1 = i2+1
vscale = vscale*60.0
mm = _re_colon.search(s2,i1)
# special handling for d exponential notation
mm = _re_d.search(s2,i1)
try:
if mm is None:
return vsign*(fvalue + float(s2[i1:])/vscale)
else:
return vsign*(fvalue + \
float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale)
except ValueError:
pass
# see if it's a stringified float
try:
return float(s2)
except ValueError:
raise ValueError("Parameter %s: illegal float value %s" %
(self.name, repr(value)))
# Otherwise it is not a strict float
raise ValueError("Parameter %s: illegal float value %s" %
(self.name, repr(value)))
# -----------------------------------------------------
# Strict real parameter class
# -----------------------------------------------------
class StrictParR(_StrictRealMixin,IrafPar):
"""Strict real parameter class"""
pass
# -----------------------------------------------------
# Utility routine for parsing choice string
# -----------------------------------------------------
_re_choice = re.compile(r'\|')
def _getChoice(s, strict):
clist = s.split("|")
# string is allowed to start and end with "|", so ignore initial
# and final empty strings
if not clist[0]: del clist[0]
if len(clist)>1 and not clist[-1]: del clist[-1]
return clist
| bsd-3-clause |
ALSchwalm/python-prompt-toolkit | prompt_toolkit/contrib/completers/filesystem.py | 3 | 3312 | from __future__ import unicode_literals
from prompt_toolkit.completion import Completer, Completion
import os
__all__ = (
'PathCompleter',
)
class PathCompleter(Completer):
"""
Complete for Path variables.
:param get_paths: Callable which returns a list of directories to look into
when the user enters a relative path.
:param file_filter: Callable which takes a filename and returns whether
this file should show up in the completion. ``None``
when no filtering has to be done.
:param min_input_len: Don't do autocompletion when the input string is shorter.
"""
def __init__(self, only_directories=False, get_paths=None, file_filter=None,
min_input_len=0, expanduser=False):
assert get_paths is None or callable(get_paths)
assert file_filter is None or callable(file_filter)
assert isinstance(min_input_len, int)
assert isinstance(expanduser, bool)
self.only_directories = only_directories
self.get_paths = get_paths or (lambda: ['.'])
self.file_filter = file_filter or (lambda _: True)
self.min_input_len = min_input_len
self.expanduser = expanduser
def get_completions(self, document, complete_event):
text = document.text_before_cursor
# Complete only when we have at least the minimal input length,
# otherwise, we can too many results and autocompletion will become too
# heavy.
if len(text) < self.min_input_len:
return
try:
# Directories where to look.
dirname = os.path.dirname(text)
if dirname:
directories = [os.path.dirname(os.path.join(p, text))
for p in self.get_paths()]
else:
directories = self.get_paths()
# Start of current file.
prefix = os.path.basename(text)
# Get all filenames.
filenames = []
for directory in directories:
# Do tilde expansion.
if self.expanduser:
directory = os.path.expanduser(directory)
# Look for matches in this directory.
if os.path.isdir(directory):
for filename in os.listdir(directory):
if filename.startswith(prefix):
filenames.append((directory, filename))
# Sort
filenames = sorted(filenames, key=lambda k: k[1])
# Yield them.
for directory, filename in filenames:
completion = filename[len(prefix):]
full_name = os.path.join(directory, filename)
if os.path.isdir(full_name):
# For directories, add a slash to the filename.
# (We don't add them to the `completion`. Users can type it
# to trigger the autocompletion themself.)
filename += '/'
else:
if self.only_directories or not self.file_filter(full_name):
continue
yield Completion(completion, 0, display=filename)
except OSError:
pass
| bsd-3-clause |
indictranstech/buyback-erp | erpnext/hr/doctype/hr_settings/hr_settings.py | 38 | 1282 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.model.document import Document
class HRSettings(Document):
def validate(self):
self.update_birthday_reminders()
from erpnext.setup.doctype.naming_series.naming_series import set_by_naming_series
set_by_naming_series("Employee", "employee_number",
self.get("emp_created_by")=="Naming Series", hide_name_field=True)
def update_birthday_reminders(self):
original_stop_birthday_reminders = cint(frappe.db.get_value("HR Settings",
None, "stop_birthday_reminders"))
# reset birthday reminders
if cint(self.stop_birthday_reminders) != original_stop_birthday_reminders:
frappe.db.sql("""delete from `tabEvent` where repeat_on='Every Year' and ref_type='Employee'""")
if not self.stop_birthday_reminders:
for employee in frappe.db.sql_list("""select name from `tabEmployee` where status='Active' and
ifnull(date_of_birth, '')!=''"""):
frappe.get_doc("Employee", employee).update_dob_event()
frappe.msgprint(frappe._("Updated Birthday Reminders")) | agpl-3.0 |
jamesrobertlloyd/kmc-research | src/deep_learning/rbm_label.py | 1 | 23562 | """This tutorial introduces restricted boltzmann machines (RBM) using Theano.
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
Modified by James Robert Lloyd, December 2013
"""
import cPickle
import gzip
import time
import PIL.Image
import numpy
import numpy as np
import theano
import theano.tensor as T
import os
from theano.tensor.shared_randomstreams import RandomStreams
from utils import tile_raster_images
from logistic_sgd import load_data
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=500, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
[pre_sigmoid_nvs, nv_means, nv_samples,
pre_sigmoid_nhs, nh_means, nh_samples], updates = \
theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k)
# determine gradients on RBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
updates[param] = param - gparam * T.cast(lr,
dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = T.mean(
T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
(1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1))
return cross_entropy
def test_rbm(learning_rate=0.1, training_epochs=15,
dataset='mnist.pkl.gz', batch_size=20,
n_chains=20, n_samples=10, output_folder='rbm_plots',
n_hidden=500, plot_every=1000):
"""
Demonstrate how to train and afterwards sample from it using Theano.
This is demonstrated on MNIST.
:param learning_rate: learning rate used for training the RBM
:param training_epochs: number of epochs used for training
:param dataset: path the the pickled dataset
:param batch_size: size of a batch used to train the RBM
:param n_chains: number of parallel Gibbs chains to be used for sampling
:param n_samples: number of samples to plot for each chain
"""
learning_rate=0.1
training_epochs=30
dataset='mnist.pkl.gz'
batch_size=20
n_chains=20
n_samples=10
output_folder='rbm_label_plots'
n_hidden=500
plot_every=1000
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[2]
# Augment training data with class labels
x_array = train_set_x.get_value()
y_list = train_set_y.owner.inputs[0].get_value()
y_array = np.zeros((x_array.shape[0], 10))
for i in range(x_array.shape[0]):
y_array[i,y_list[i]] = 1
combined_array = np.hstack((x_array, y_array))
train_set_x.set_value(combined_array)
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(1234)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
rbm = RBM(input=x, n_visible=28 * 28 + 10,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,
persistent=persistent_chain, k=15)
#################################
# Training the RBM #
#################################
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function([index], cost,
updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]},
name='train_rbm')
plotting_time = 0.
start_time = time.clock()
plotting_start = time.clock()
# Construct image from the weight matrix
image = PIL.Image.fromarray(tile_raster_images(
X=rbm.W.get_value(borrow=True)[:784].T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('random_filters.png')
plotting_stop = time.clock()
plotting_time += (plotting_stop - plotting_start)
# go through training epochs
for epoch in xrange(training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
print 'batch %d of %d, epoch %d of %d' % (batch_index+1, n_train_batches, epoch+1, training_epochs)
mean_cost += [train_rbm(batch_index)]
print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost)
# Plot filters after each training epoch
plotting_start = time.clock()
# Construct image from the weight matrix
image = PIL.Image.fromarray(tile_raster_images(
X=rbm.W.get_value(borrow=True)[:784].T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('filters_at_epoch_%i.png' % epoch)
plotting_stop = time.clock()
plotting_time += (plotting_stop - plotting_start)
end_time = time.clock()
pretraining_time = (end_time - start_time) - plotting_time
print ('Training took %f minutes' % (pretraining_time / 60.))
# save the rbm
with open('rbm_saved.p', 'w') as save_file:
cPickle.dump(rbm, save_file)
# #################################
# # Sampling from the RBM #
# #################################
images = np.zeros((0,28*28))
labels = np.zeros((0,1))
n_chains = 1
plot_every = 1000
number_of_train_samples = train_set_x.get_value(borrow=True).shape[0]
count = 0
while True:
# pick random test examples, with which to initialize the persistent chain
train_idx = rng.randint(number_of_train_samples - n_chains)
starting_image = numpy.asarray(train_set_x.get_value(borrow=True)[train_idx:train_idx + n_chains])
vis = starting_image
for dummy in range(plot_every):
pre_sigmoid_activation = np.dot(vis, rbm.W.get_value()) + rbm.hbias.get_value()
hid_prob = 1 / (1 + np.exp(-pre_sigmoid_activation))
hid = (hid_prob > np.random.rand(hid_prob.shape[0], hid_prob.shape[1])) * 1
pre_sigmoid_activation = np.dot(hid, rbm.W.get_value().T) + rbm.vbias.get_value()
vis_prob = 1 / (1 + np.exp(-pre_sigmoid_activation))
vis = (vis_prob > np.random.rand(vis_prob.shape[0], vis.shape[1])) * 1
# Clamp
vis[0,-10:] = starting_image[0,-10:]
vis_image = vis_prob[0,0:(28*28)]
images = np.vstack((images, vis_image))
labels = np.vstack((labels, np.where(starting_image[0,-10:])[0][0]))
np.savetxt('images.csv', images, delimiter=',')
np.savetxt('labels.csv', labels, delimiter=',')
count += 1
print 'Saved %d images' % count
os.chdir('../')
return rbm
def train_rbm(learning_rate=0.1, training_epochs=15,
dataset='mnist.pkl.gz', batch_size=20,
n_hidden=500, random_seed=1, augment_with_labels=True):
if isinstance(dataset, str):
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[2]
else:
train_set_x, train_set_y, test_set_x, test_set_y = dataset
if augment_with_labels:
# Augment training data with class labels so that we can later sample from class conditional distributions
x_array = train_set_x.get_value()
y_list = train_set_y.owner.inputs[0].get_value()
y_array = np.zeros((x_array.shape[0], 10))
for i in range(x_array.shape[0]):
y_array[i,y_list[i]] = 1
combined_array = np.hstack((x_array, y_array))
train_set_x.set_value(combined_array)
n_visible = train_set_x.get_value().shape[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(random_seed)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
#n_visible = 28 * 28 + 10 if augment_with_labels else 28 * 28
rbm = RBM(input=x, n_visible=n_visible,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,
persistent=persistent_chain, k=15)
#################################
# Training the RBM #
#################################
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function([index], cost,
updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]},
name='train_rbm')
start_time = time.clock()
# go through training epochs
for epoch in xrange(training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
print 'batch %d of %d, epoch %d of %d' % (batch_index+1, n_train_batches, epoch+1, training_epochs)
mean_cost += [train_rbm(batch_index)]
print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost)
end_time = time.clock()
pretraining_time = (end_time - start_time)
print ('Training took %f minutes' % (pretraining_time / 60.))
return (rbm, train_set_x, train_set_y, test_set_x, test_set_y) | mit |
diegoguimaraes/django | django/contrib/gis/gdal/srs.py | 11 | 12004 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
jeanlinux/calibre | setup/pypi.py | 14 | 13151 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, StringIO, urllib2, urlparse, base64, hashlib, httplib, socket
from ConfigParser import ConfigParser
from setup import Command, __appname__, __version__
from setup.install import Sdist
class Metadata(object):
name = __appname__
version = __version__
author = 'Kovid Goyal'
author_email = '[email protected]'
url = 'http://calibre-ebook.com'
description = 'E-book management application.'
long_description = open('README.md', 'rb').read()
license = 'GPL'
keywords = ['e-book', 'ebook', 'news', 'reading', 'catalog', 'books']
platforms = ['Linux', 'Windows', 'OS X']
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Hardware :: Hardware Drivers'
]
class PyPIRC(object):
DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
RC = os.path.expanduser('~/.pypirc')
def read_pypirc(self):
repository = self.DEFAULT_REPOSITORY
realm = self.DEFAULT_REALM
config = ConfigParser()
config.read(self.RC)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
current['password'] = config.get(server, 'password')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
class PyPIRegister(Command):
description = 'Register distribution with PyPI'
def add_options(self, parser):
parser.add_option('--show-response', default=False, action='store_true',
help='Show server responses')
def run(self, opts):
self.show_response = opts.show_response
config = PyPIRC().read_pypirc()
self.repository = config['repository']
self.realm = config['realm']
#self.verify_metadata()
self.send_metadata(config['username'], config['password'])
def send_metadata(self, username, password):
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.info('Server response (%s): %s' % (code, result))
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
print 'Server response (%s): %s'%(code, result)
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = Metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': Metadata.name,
'version': Metadata.version,
'summary': Metadata.description,
'home_page': Metadata.url,
'author': Metadata.author,
'author_email': Metadata.author_email,
'license': Metadata.license,
'description': Metadata.long_description,
'keywords': meta.keywords,
'platform': meta.platforms,
'classifiers': Metadata.classifiers,
'download_url': 'UNKNOWN',
# PEP 314
'provides': [],
'requires': [],
'obsoletes': [],
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
self.info('Registering %s to %s' % (data['name'],
self.repository))
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
value = unicode(value).encode("utf-8")
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
print '-'*75, data, '-'*75
return result
class PyPIUpload(PyPIRegister):
description = 'Upload source distribution to PyPI'
sub_commands = ['sdist', 'pypi_register']
def add_options(self, parser):
pass
def run(self, opts):
self.show_response = opts.show_response
config = PyPIRC().read_pypirc()
self.repository = config['repository']
self.realm = config['realm']
self.username = config['username']
self.password = config['password']
self.upload_file('sdist', '', Sdist.DEST)
def upload_file(self, command, pyversion, filename):
# Sign if requested
#if self.sign:
# gpg_args = ["gpg", "--detach-sign", "-a", filename]
# if self.identity:
# gpg_args[2:2] = ["--local-user", self.identity]
# spawn(gpg_args,
# dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
content = open(filename,'rb').read()
meta = Metadata
md5 = hashlib.md5()
md5.update(content)
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.name,
'version': meta.version,
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5.hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.description,
'home_page': meta.url,
'author': meta.author,
'author_email': meta.author_email,
'license': meta.license,
'description': meta.long_description,
'keywords': meta.keywords,
'platform': meta.platforms,
'classifiers': meta.classifiers,
'download_url': 'UNKNOWN',
# PEP 314
'provides': [],
'requires': [],
'obsoletes': [],
}
comment = ''
data['comment'] = comment
#if self.sign:
# data['gpg_signature'] = (os.path.basename(filename) + ".asc",
# open(filename+".asc").read())
# set up the authentication
auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) != type([]):
value = [value]
for value in value:
if type(value) is tuple:
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
value = str(value)
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.info("Submitting %s to %s" % (filename, self.repository))
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
http = httplib.HTTPConnection(netloc)
elif schema == 'https':
http = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema "+schema)
data = ''
try:
http.connect()
http.putrequest("POST", url)
http.putheader('Content-type',
'multipart/form-data; boundary=%s'%boundary)
http.putheader('Content-length', str(len(body)))
http.putheader('Authorization', auth)
http.endheaders()
http.send(body)
except socket.error, e:
self.warn(str(e))
raise SystemExit(1)
r = http.getresponse()
if r.status == 200:
self.info('Server response (%s): %s' % (r.status, r.reason))
else:
self.info('Upload failed (%s): %s' % (r.status, r.reason))
raise SystemExit(1)
if self.show_response:
print '-'*75, r.read(), '-'*75
| gpl-3.0 |
slavi104/cashtracker | cashtracker_project/app_cashtracker/tests/report_pdf_tests.py | 1 | 2438 | from django.test import TestCase
from app_cashtracker.helpers.ReportPDF import *
from app_cashtracker.models.User import User
from app_cashtracker.models.Category import Category
from app_cashtracker.models.Subcategory import Subcategory
from app_cashtracker.models.Payment import Payment
from app_cashtracker.models.Report import Report
from app_cashtracker.helpers.util import take_date
from django.utils import timezone
class ReportPDFTests(TestCase):
def test_create_report(self):
now = timezone.now()
user = User()
user.created = now
user.save()
category = Category()
category.name = 'Test category'
category.description = ''
category.user = user
category.save()
subcategory = Subcategory()
subcategory.name = ''
subcategory.category = category
subcategory.save()
payment = Payment()
payment.value = 10
payment.currency = 'EUR'
payment.category = category
payment.subcategory = subcategory
payment.comment = ''
payment.date_time = now
payment.user = user
payment.save()
report = Report()
report.user = user
report.created = now.strftime('%Y-%m-%d %H:%M:%S')
report.report_type = 'today'
report.report_date = now.strftime('%Y-%m-%d %H:%M:%S')
report.start_date = take_date('today')
report.end_date = now.strftime('%Y-%m-%d %H:%M:%S')
report.currency = 'BGN'
report.is_active = 1
report.save()
report.url = 'app_cashtracker/reports/{}.pdf'.format(report)
report.save()
report_pdf = ReportPDF(report)
self.assertEqual(len(report_pdf.elements), 0)
report_pdf.generate_header()
self.assertEqual(len(report_pdf.elements), 5)
report_pdf.generate_statistics_data_and_table()
self.assertEqual(len(report_pdf.elements), 8)
report_pdf.generate_pie_charts()
self.assertEqual(len(report_pdf.elements), 9)
if len(report_pdf.lc_data) != 0:
report_pdf.generate_line_charts()
self.assertEqual(len(report_pdf.elements), 10)
report_pdf.build_and_save()
self.assertEqual(ReportPDF.payments_table_labels, [
'Date',
'Name',
'Category',
'Subcategory',
'Comment',
'Value'
])
| lgpl-3.0 |
ruymanengithub/vison | vison/datamodel/QLAtools.py | 1 | 10255 | # -*- coding: utf-8 -*-
"""
Quick-Look-Analysis Tools.
:History:
Created on Wed Mar 16 11:31:58 2016
@author: Ruyman Azzollini
"""
# IMPORT STUFF
import numpy as np
import os
from matplotlib import pyplot as plt
import string as st
from vison.datamodel import ccd
from vison.support import latex
from pdb import set_trace as stop
# END IMPORT
isthere = os.path.exists
QUADS = ['E', 'F', 'G', 'H']
latex_templates = {'table_stats':
['\\begin{center}',
'\\resizebox{0.3\\textwidth}{!}{',
'\\begin{tabular}{|c|c|c|c|c|c|c|c|}',
'\hline',
'\multicolumn{7}{c}{%s}\\\\'
'\hline',
'section & mean & std & min & max & p25 & p50 & p75\\\\',
'\hline',
'prescan & %.2f & %.3f & %.2f & %.2f & %.2f & %.2f & %.2f\\\\',
'image & %.2f & %.3f & %.2f & %.2f & %.2f & %.2f & %.2f\\\\',
'overscan & %.2f & %.3f & %.2f & %.2f & %.2f & %.2f & %.2f\\\\',
'\hline',
'\end{tabular}}',
'\end{center}']}
def getacrosscolscut(CCDobj):
""" """
result = {}
for iQ, QUAD in enumerate(QUADS):
data = CCDobj.get_quad(QUAD).copy()
ncols = data.shape[0]
stats = np.zeros((2, ncols))
for icol in range(ncols):
col = data[icol, :]
stats[:, icol] = (np.mean(col), np.std(col))
result[QUAD] = {}
result[QUAD] = stats.copy()
return result
def getacrossrowscut(CCDobj):
""" """
result = {}
for iQ, QUAD in enumerate(QUADS):
prestart, preend, imgstart, imgend, ovstart, ovend = CCDobj.getsectioncollims(
QUAD)
prestart += 3
preend -= 3
imgstart += 3
imgend -= 3
ovstart += 3
ovend -= 3
data = CCDobj.get_quad(QUAD).copy()
nrows = data.shape[1]
prestats = np.zeros((2, nrows))
ovstats = np.zeros((2, nrows))
imgstats = np.zeros((2, nrows))
for irow in range(nrows):
prescan = data[prestart:preend, irow]
ovscan = data[ovstart:ovend, irow]
imgline = data[imgstart:imgend, irow]
prestats[:, irow] = (np.mean(prescan), np.std(prescan))
ovstats[:, irow] = (np.mean(ovscan), np.std(ovscan))
imgstats[:, irow] = (np.mean(imgline), np.std(imgline))
result[QUAD] = {}
result[QUAD]['prescan'] = prestats.copy()
result[QUAD]['overscan'] = ovstats.copy()
result[QUAD]['image'] = imgstats.copy()
return result
def getsectionstats(CCDobj, QUAD, section, xbuffer=(0, 0), ybuffer=(0, 0)):
""" """
prestart, preend, imgstart, imgend, ovstart, ovend = CCDobj.getsectioncollims(
QUAD)
if section == 'prescan':
x0, x1 = prestart, preend - xbuffer[1]
elif section == 'overscan':
x0, x1 = ovstart, ovend
elif section == 'image':
x0, x1 = imgstart, imgend
x0 += xbuffer[0]
x1 -= xbuffer[1]
quaddata = CCDobj.get_quad(QUAD)
NX, NY = quaddata.shape
y0, y1 = (0, NY - 1)
y0 += ybuffer[0]
y1 -= ybuffer[1]
subquad = quaddata[x0:x1, y0:y1]
stats = [
np.mean(subquad), np.std(subquad), np.min(subquad), np.max(subquad), np.percentile(
subquad, q=25), np.percentile(
subquad, q=50), np.percentile(
subquad, q=75)]
return stats
def plotQuads(CCDobj, filename=None, suptitle=''):
""" """
figP = plt.figure(figsize=(6, 6))
figP.suptitle(suptitle)
axP = []
pQUADS = ['E', 'F', 'H', 'G']
for iQ, QUAD in enumerate(pQUADS):
data = CCDobj.get_quad(QUAD).copy()
ixnozero = np.where(data != 0.)
if len(ixnozero[0] != 0):
minP = np.percentile(data[ixnozero], q=30)
maxP = np.percentile(data[ixnozero], q=70)
else:
minP = 0
maxP = 0
axP.append(figP.add_subplot(2, 2, iQ + 1))
axP[-1].imshow(data.transpose(), origin='lower left', cmap='hot',
clim=(minP, maxP))
axP[-1].set_title(QUAD)
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close('all')
def plotAcROWcuts(dissection, filename=None, suptitle=''):
""" """
figP = plt.figure(figsize=(6, 6))
figP.suptitle(suptitle)
axP = []
pQUADS = ['E', 'F', 'H', 'G']
for iQ, QUAD in enumerate(pQUADS):
data = dissection[QUAD].copy()
ncols = data.shape[1]
col = np.arange(ncols) + 1
meanval = data[0, :].copy()
axP.append(figP.add_subplot(2, 2, iQ + 1))
axP[-1].plot(col, meanval)
axP[-1].set_title(QUAD)
if QUAD in ['E', 'H']:
axP[-1].set_ylabel('ADU')
if QUAD in ['H', 'G']:
axP[-1].set_xlabel('col')
axP[-1].ticklabel_format(style='sci', axis='x', scilimits=(2, 2))
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close('all')
def plotAcCOLcuts(dissection, filename=None, suptitle=''):
""" """
figP = plt.figure(figsize=(6, 6))
figP.suptitle(suptitle)
axP = []
pQUADS = ['E', 'F', 'H', 'G']
colors = ['b', 'g', 'r']
for iQ, QUAD in enumerate(pQUADS):
data = dissection[QUAD]
axP.append(figP.add_subplot(2, 2, iQ + 1))
for ia, area in enumerate(['prescan', 'image', 'overscan']):
nrows = data[area].shape[1]
row = np.arange(nrows) + 1
meanval = data[area][0, :].copy()
axP[-1].plot(row, meanval, color=colors[ia])
axP[-1].set_title(QUAD)
if QUAD in ['E', 'H']:
axP[-1].set_ylabel('ADU')
if QUAD in ['H', 'G']:
axP[-1].set_xlabel('row')
axP[-1].ticklabel_format(style='sci', axis='x', scilimits=(2, 2))
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close('all')
def dissectFITS(FITSfile, path=''):
""" """
diss = {}
CCDobj = ccd.CCD(FITSfile)
# Header selection
sel_header_keys = ['EXTNAME', 'PROGRAM', 'OBJECT', 'OBSID', 'DATE', 'EXPTIME',
'CHRG_INJ', 'TRAPPUMP', 'VSTART', 'VEND', 'WAVELENG',
'CCD_SN', 'ROE_SN', 'FPGA_VER', 'TOI_FLU', 'TOI_PUMP',
'TOI_READ', 'INVFLSHP', 'INVFLUSH', 'FLUSHES', 'R1CCD2TT',
'R1CCD2TB', 'IDL_V', 'IDH_V', 'IG1_V', 'IG2_V',
'ODCCD2_V', 'RDCCD2_V']
diss['subheader'] = []
for key in sel_header_keys:
diss['subheader'].append('%s=%s' % (key, CCDobj.header[key]))
OBSID = CCDobj.header['OBSID']
DATE = CCDobj.header['DATE']
rootfname = '%s_%s' % (OBSID, DATE)
xbuffer = (3, 3)
ybuffer = (3, 3)
# STATISTICS : mean, std, min, max, p25, p50, p75
diss['imgstats'] = {}
diss['prestats'] = {}
diss['overstats'] = {}
for iQ, QUAD in enumerate(QUADS):
diss['imgstats'][QUAD] = getsectionstats(
CCDobj, QUAD, 'image', xbuffer=xbuffer, ybuffer=ybuffer)
diss['prestats'][QUAD] = getsectionstats(
CCDobj, QUAD, 'prescan', xbuffer=xbuffer, ybuffer=ybuffer)
diss['overstats'][QUAD] = getsectionstats(
CCDobj, QUAD, 'overscan', xbuffer=xbuffer, ybuffer=ybuffer)
# Across Columns profiles of mean and std
diss['acrosscolscut'] = getacrosscolscut(CCDobj)
accolfname = '%s_ACCOL.eps' % (rootfname,)
accolfname = os.path.join(path, accolfname)
plotAcROWcuts(diss['acrosscolscut'], filename=accolfname, suptitle=OBSID)
# Across Rows profiles of mean and std
diss['acrossrowscut'] = getacrossrowscut(CCDobj)
acrowfname = '%s_ACROW.eps' % (rootfname,)
acrowfname = os.path.join(path, acrowfname)
plotAcCOLcuts(diss['acrossrowscut'], filename=acrowfname, suptitle=OBSID)
# Quadrant Stamps
quadsfname = '%s_STAMP.eps' % rootfname
quadsfname = os.path.join(path, quadsfname)
plotQuads(CCDobj, filename=quadsfname, suptitle=OBSID)
diss['STAMP'] = quadsfname
diss['COLS'] = accolfname
diss['ROWS'] = acrowfname
return diss
def reportFITS(FITSfile, outpath=''):
""" """
assert isthere(FITSfile)
bareimg = os.path.split(FITSfile)[-1]
PDFroot = '%s' % os.path.splitext(bareimg)[0]
dissection = dissectFITS(FITSfile, path=outpath)
report = latex.LaTeX(fontsize=10)
figlist = []
niceFITSname = bareimg.repace('_', '\\_')
report.body.append('%s\\\\' % niceFITSname)
report.body.append('\n')
report.body += ['\\begin{multicols}{2}']
report.body.append('\\begin{verbatim}')
for line in dissection['subheader']:
#niceline = line.replace('_','\_')
#report.body.append('%s\\\\' % niceline)
report.body.append(line)
report.body.append('\\end{verbatim}')
# report.body.append('\\newline')
for QUAD in QUADS:
prestats = dissection['prestats'][QUAD]
imgstats = dissection['imgstats'][QUAD]
overstats = dissection['overstats'][QUAD]
alignedstats = tuple([QUAD] + prestats + imgstats + overstats)
template_stats = '__^__'.join(latex_templates['table_stats'])
table_stats = template_stats % alignedstats
table_stats = table_stats.split('__^__')
report.body += table_stats
# getacrosscolscut(CCDobj,QUAD)
# getacrossrowscut(CCDobj,QUAD)
report.body.append('\columnbreak')
report.addfigtobody(dissection['STAMP'], imgsize=7)
report.addfigtobody(dissection['COLS'], imgsize=7)
report.addfigtobody(dissection['ROWS'], imgsize=7)
figlist += [dissection['STAMP'], dissection['COLS'], dissection['ROWS']]
report.body.append('\end{multicols}')
report.Write('%s.tex' % PDFroot)
report.Compile2PDF('%s.tex' % PDFroot, cleanafter=True, figures=figlist)
os.system('mv %s.pdf %s/' % (PDFroot, outpath))
PDF = os.path.join(outpath, '%s.pdf' % PDFroot)
return PDF
| gpl-3.0 |
manojpandey/hackenvision16 | tinybank/tinybank/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py | 199 | 2167 | from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks, wether a the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
| mit |
home-assistant/home-assistant | tests/components/met_eireann/test_config_flow.py | 3 | 3022 | """Tests for Met Éireann config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.met_eireann.const import DOMAIN, HOME_LOCATION_NAME
from homeassistant.const import CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE
@pytest.fixture(name="met_eireann_setup", autouse=True)
def met_setup_fixture():
"""Patch Met Éireann setup entry."""
with patch(
"homeassistant.components.met_eireann.async_setup_entry", return_value=True
):
yield
async def test_show_config_form(hass):
"""Test show configuration form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == config_entries.SOURCE_USER
async def test_flow_with_home_location(hass):
"""Test config flow.
Test the flow when a default location is configured.
Then it should return a form with default values.
"""
hass.config.latitude = 1
hass.config.longitude = 2
hass.config.elevation = 3
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == config_entries.SOURCE_USER
default_data = result["data_schema"]({})
assert default_data["name"] == HOME_LOCATION_NAME
assert default_data["latitude"] == 1
assert default_data["longitude"] == 2
assert default_data["elevation"] == 3
async def test_create_entry(hass):
"""Test create entry from user input."""
test_data = {
"name": "test",
CONF_LONGITUDE: 0,
CONF_LATITUDE: 0,
CONF_ELEVATION: 0,
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=test_data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == test_data.get("name")
assert result["data"] == test_data
async def test_flow_entry_already_exists(hass):
"""Test user input for config_entry that already exists.
Test to ensure the config form does not allow duplicate entries.
"""
test_data = {
"name": "test",
CONF_LONGITUDE: 0,
CONF_LATITUDE: 0,
CONF_ELEVATION: 0,
}
# Create the first entry and assert that it is created successfully
result1 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=test_data
)
assert result1["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Create the second entry and assert that it is aborted
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=test_data
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
| apache-2.0 |
holiman/evmlab | netsstore.py | 1 | 1143 | """
"""
#!/usr/bin/env python
import json
import tempfile, os
from evmlab import compiler as c
from evmlab import vm
from evmlab import genesis
def generateCall():
# the caller
p = c.Program()
# create2(self,value = 0,instart = 0, insize = 0, salt = 0):
p.op(c.ADDRESS)
p.create2(0,0,0,0)
# the callee, doing the create
return p.bytecode()
def main():
g = genesis.Genesis()
g.setConfigConstantinople()
bytecode = generateCall()
#print("code:", bytecode)
# g.addPrestateAccount({'address':'0x0000000000000000000000000000000000000000', 'code': '0x'+bytecode, 'balance':"0x00",'nonce':"0x01"})
(geth_g, parity_g) = g.export()
print(parity_g)
print(geth_g)
geth = vm.GethVM("/home/martin/go/src/github.com/ethereum/go-ethereum/build/bin/evm")
g_out = geth.execute(code = bytecode, receiver="0x0000000000000000000000000000000000000000", genesis = geth_g, json=True, gas=100000, memory=False)
#print(geth.lastCommand)
print("")
l = len(g_out)
for i in range(0,l):
print(vm.toText(json.loads(g_out[i])))
if __name__ == '__main__':
main() | gpl-3.0 |
zlorf/django-dbsettings | dbsettings/forms.py | 2 | 2516 | import re
from collections import OrderedDict
from django.apps import apps
from django import forms
from django.utils.text import capfirst
from dbsettings.loading import get_setting_storage
RE_FIELD_NAME = re.compile(r'^(.+)__(.*)__(.+)$')
class SettingsEditor(forms.BaseForm):
"Base editor, from which customized forms are created"
def __iter__(self):
for field in super(SettingsEditor, self).__iter__():
yield self.specialize(field)
def __getitem__(self, name):
field = super(SettingsEditor, self).__getitem__(name)
return self.specialize(field)
def specialize(self, field):
"Wrapper to add module_name and class_name for regrouping"
field.label = capfirst(field.label)
module_name, class_name, _ = RE_FIELD_NAME.match(field.name).groups()
app_label = self.apps[field.name]
field.module_name = app_label
if class_name:
model = apps.get_model(app_label, class_name)
if model:
class_name = model._meta.verbose_name
field.class_name = class_name
field.verbose_name = self.verbose_names[field.name]
return field
def customized_editor(user, settings):
"Customize the setting editor based on the current user and setting list"
base_fields = OrderedDict()
verbose_names = {}
apps = {}
for setting in settings:
perm = '%s.can_edit_%s_settings' % (
setting.app,
setting.class_name.lower()
)
if user.has_perm(perm):
# Add the field to the customized field list
storage = get_setting_storage(*setting.key)
kwargs = {
'label': setting.description,
'help_text': setting.help_text,
# Provide current setting values for initializing the form
'initial': setting.to_editor(storage.value),
'required': setting.required,
'widget': setting.widget,
}
if setting.choices:
field = forms.ChoiceField(choices=setting.choices, **kwargs)
else:
field = setting.field(**kwargs)
key = '%s__%s__%s' % setting.key
apps[key] = setting.app
base_fields[key] = field
verbose_names[key] = setting.verbose_name
attrs = {'base_fields': base_fields, 'verbose_names': verbose_names, 'apps': apps}
return type('SettingsEditor', (SettingsEditor,), attrs)
| bsd-3-clause |
lordmuffin/aws-cfn-plex | functions/credstash/pip/_vendor/requests/compat.py | 327 | 1687 | # -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
# try:
# import simplejson as json
# except (ImportError, SyntaxError):
# # simplejson does not support Python 3.2, it throws a SyntaxError
# # because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit |
fyffyt/pylearn2 | pylearn2/scripts/plot_monitor.py | 37 | 10204 | #!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
| bsd-3-clause |
mrgloom/menpofit | menpofit/atm/fitter.py | 1 | 14525 | from __future__ import division
from menpofit.fitter import MultilevelFitter
from menpofit.fittingresult import AMMultilevelFittingResult
from menpofit.transform import (ModelDrivenTransform, OrthoMDTransform,
DifferentiableAlignmentSimilarity)
from menpofit.lucaskanade.residual import SSD, GaborFourier
from menpofit.lucaskanade.image import IC
from menpofit.base import name_of_callable
class ATMFitter(MultilevelFitter):
r"""
Abstract Interface for defining Active Template Models Fitters.
Parameters
-----------
atm : :map:`ATM`
The Active Template Model to be used.
"""
def __init__(self, atm):
self.atm = atm
@property
def reference_shape(self):
r"""
The reference shape of the ATM.
:type: :map:`PointCloud`
"""
return self.atm.reference_shape
@property
def features(self):
r"""
The feature extracted at each pyramidal level during ATM building.
Stored in ascending pyramidal order.
:type: `list`
"""
return self.atm.features
@property
def n_levels(self):
r"""
The number of pyramidal levels used during ATM building.
:type: `int`
"""
return self.atm.n_levels
@property
def downscale(self):
r"""
The downscale used to generate the final scale factor applied at
each pyramidal level during ATM building.
The scale factor is computed as:
``(downscale ** k) for k in range(n_levels)``
:type: `float`
"""
return self.atm.downscale
def _create_fitting_result(self, image, fitting_results, affine_correction,
gt_shape=None):
r"""
Creates a :map:`ATMMultilevelFittingResult` associated to a
particular fitting of the ATM fitter.
Parameters
-----------
image : :map:`Image` or subclass
The image to be fitted.
fitting_results : `list` of :map:`FittingResult`
A list of fitting result objects containing the state of the
the fitting for each pyramidal level.
affine_correction : :map:`Affine`
An affine transform that maps the result of the top resolution
level to the scale space of the original image.
gt_shape : :map:`PointCloud`, optional
The ground truth shape associated to the image.
error_type : 'me_norm', 'me' or 'rmse', optional
Specifies how the error between the fitted and ground truth
shapes must be computed.
Returns
-------
fitting : :map:`ATMMultilevelFittingResult`
A fitting result object that will hold the state of the ATM
fitter for a particular fitting.
"""
return ATMMultilevelFittingResult(
image, self, fitting_results, affine_correction, gt_shape=gt_shape)
class LucasKanadeATMFitter(ATMFitter):
r"""
Lucas-Kanade based :map:`Fitter` for Active Template Models.
Parameters
-----------
atm : :map:`ATM`
The Active Template Model to be used.
algorithm : subclass of :map:`ImageLucasKanade`, optional
The Image Lucas-Kanade class to be used.
md_transform : :map:`ModelDrivenTransform` or subclass, optional
The model driven transform class to be used.
n_shape : `int` ``> 1``, ``0. <=`` `float` ``<= 1.``, `list` of the
previous or ``None``, optional
The number of shape components or amount of shape variance to be
used per pyramidal level.
If `None`, all available shape components ``(n_active_components)``
will be used.
If `int` ``> 1``, the specified number of shape components will be
used.
If ``0. <=`` `float` ``<= 1.``, the number of components capturing the
specified variance ratio will be computed and used.
If `list` of length ``n_levels``, then the number of components is
defined per level. The first element of the list corresponds to the
lowest pyramidal level and so on.
If not a `list` or a `list` of length 1, then the specified number of
components will be used for all levels.
"""
def __init__(self, atm, algorithm=IC, residual=SSD,
md_transform=OrthoMDTransform, n_shape=None, **kwargs):
super(LucasKanadeATMFitter, self).__init__(atm)
self._set_up(algorithm=algorithm, residual=residual,
md_transform=md_transform, n_shape=n_shape, **kwargs)
@property
def algorithm(self):
r"""
Returns a string containing the name of fitting algorithm.
:type: `str`
"""
return 'LK-ATM-' + self._fitters[0].algorithm
def _set_up(self, algorithm=IC,
residual=SSD, md_transform=OrthoMDTransform,
global_transform=DifferentiableAlignmentSimilarity,
n_shape=None, **kwargs):
r"""
Sets up the Lucas-Kanade fitter object.
Parameters
-----------
algorithm : subclass of :map:`ImageLucasKanade`, optional
The Image Lucas-Kanade class to be used.
md_transform : :map:`ModelDrivenTransform` or subclass, optional
The model driven transform class to be used.
n_shape : `int` ``> 1``, ``0. <=`` `float` ``<= 1.``, `list` of the
previous or ``None``, optional
The number of shape components or amount of shape variance to be
used per pyramidal level.
If `None`, all available shape components ``(n_active_components)``
will be used.
If `int` ``> 1``, the specified number of shape components will be
used.
If ``0. <=`` `float` ``<= 1.``, the number of components capturing
the specified variance ratio will be computed and used.
If `list` of length ``n_levels``, then the number of components is
defined per level. The first element of the list corresponds to the
lowest pyramidal level and so on.
If not a `list` or a `list` of length 1, then the specified number
of components will be used for all levels.
Raises
-------
ValueError
``n_shape`` can be an `int`, `float`, ``None`` or a `list`
containing ``1`` or ``n_levels`` of those.
"""
# check n_shape parameter
if n_shape is not None:
if type(n_shape) is int or type(n_shape) is float:
for sm in self.atm.shape_models:
sm.n_active_components = n_shape
elif len(n_shape) == 1 and self.atm.n_levels > 1:
for sm in self.atm.shape_models:
sm.n_active_components = n_shape[0]
elif len(n_shape) == self.atm.n_levels:
for sm, n in zip(self.atm.shape_models, n_shape):
sm.n_active_components = n
else:
raise ValueError('n_shape can be an integer or a float or None '
'or a list containing 1 or {} of '
'those'.format(self.atm.n_levels))
self._fitters = []
for j, (t, sm) in enumerate(zip(self.atm.warped_templates,
self.atm.shape_models)):
if md_transform is not ModelDrivenTransform:
md_trans = md_transform(
sm, self.atm.transform, global_transform,
source=t.landmarks['source'].lms)
else:
md_trans = md_transform(
sm, self.atm.transform,
source=t.landmarks['source'].lms)
if residual is not GaborFourier:
self._fitters.append(
algorithm(t, residual(), md_trans, **kwargs))
else:
self._fitters.append(
algorithm(t, residual(t.shape), md_trans,
**kwargs))
def __str__(self):
out = "{0} Fitter\n" \
" - Lucas-Kanade {1}\n" \
" - Transform is {2} and residual is {3}.\n" \
" - {4} training images.\n".format(
self.atm._str_title, self._fitters[0].algorithm,
self._fitters[0].transform.__class__.__name__,
self._fitters[0].residual.type, self.atm.n_training_shapes)
# small strings about number of channels, channels string and downscale
n_channels = []
down_str = []
for j in range(self.n_levels):
n_channels.append(
self._fitters[j].template.n_channels)
if j == self.n_levels - 1:
down_str.append('(no downscale)')
else:
down_str.append('(downscale by {})'.format(
self.downscale**(self.n_levels - j - 1)))
# string about features and channels
if self.pyramid_on_features:
feat_str = "- Feature is {} with ".format(name_of_callable(
self.features))
if n_channels[0] == 1:
ch_str = ["channel"]
else:
ch_str = ["channels"]
else:
feat_str = []
ch_str = []
for j in range(self.n_levels):
if isinstance(self.features[j], str):
feat_str.append("- Feature is {} with ".format(
self.features[j]))
elif self.features[j] is None:
feat_str.append("- No features extracted. ")
else:
feat_str.append("- Feature is {} with ".format(
self.features[j].__name__))
if n_channels[j] == 1:
ch_str.append("channel")
else:
ch_str.append("channels")
if self.n_levels > 1:
if self.atm.scaled_shape_models:
out = "{} - Gaussian pyramid with {} levels and downscale " \
"factor of {}.\n - Each level has a scaled shape " \
"model (reference frame).\n".format(out, self.n_levels,
self.downscale)
else:
out = "{} - Gaussian pyramid with {} levels and downscale " \
"factor of {}:\n - Shape models (reference frames) " \
"are not scaled.\n".format(out, self.n_levels,
self.downscale)
if self.pyramid_on_features:
out = "{} - Pyramid was applied on feature space.\n " \
"{}{} {} per image.\n".format(out, feat_str,
n_channels[0], ch_str[0])
if not self.atm.scaled_shape_models:
out = "{} - Reference frames of length {} " \
"({} x {}C, {} x {}C)\n".format(
out,
self._fitters[0].template.n_true_pixels() *
n_channels[0],
self._fitters[0].template.n_true_pixels(),
n_channels[0], self._fitters[0].template._str_shape,
n_channels[0])
else:
out = "{} - Features were extracted at each pyramid " \
"level.\n".format(out)
for i in range(self.n_levels - 1, -1, -1):
out = "{} - Level {} {}: \n".format(out, self.n_levels - i,
down_str[i])
if not self.pyramid_on_features:
out = "{} {}{} {} per image.\n".format(
out, feat_str[i], n_channels[i], ch_str[i])
if (self.atm.scaled_shape_models or
(not self.pyramid_on_features)):
out = "{} - Reference frame of length {} " \
"({} x {}C, {} x {}C)\n".format(
out,
self._fitters[i].template.n_true_pixels() *
n_channels[i],
self._fitters[i].template.n_true_pixels(),
n_channels[i], self._fitters[i].template._str_shape,
n_channels[i])
out = "{0} - {1} motion components\n\n".format(
out, self._fitters[i].transform.n_parameters)
else:
if self.pyramid_on_features:
feat_str = [feat_str]
out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \
" - Reference frame of length {4} ({5} x {6}C, " \
"{7} x {8}C)\n - {9} motion parameters\n".format(
out, feat_str[0], n_channels[0], ch_str[0],
self._fitters[0].template.n_true_pixels() * n_channels[0],
self._fitters[0].template.n_true_pixels(),
n_channels[0], self._fitters[0].template._str_shape,
n_channels[0], self._fitters[0].transform.n_parameters)
return out
class ATMMultilevelFittingResult(AMMultilevelFittingResult):
r"""
Class that holds the state of a :map:`ATMFitter` object before,
during and after it has fitted a particular image.
"""
@property
def atm_reconstructions(self):
r"""
The list containing the atm reconstruction (i.e. the template warped on
the shape instance reconstruction) obtained at each fitting iteration.
Note that this reconstruction is only tested to work for the
:map:`OrthoMDTransform`
:type: list` of :map:`Image` or subclass
"""
atm_reconstructions = []
for level, f in enumerate(self.fitting_results):
for shape_w in f.parameters:
shape_w = shape_w[4:]
sm_level = self.fitter.aam.shape_models[level]
swt = shape_w / sm_level.eigenvalues[:len(shape_w)] ** 0.5
atm_reconstructions.append(self.fitter.aam.instance(
shape_weights=swt, level=level))
return atm_reconstructions
| bsd-3-clause |
alextsui05/gmock | gtest/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
almarklein/scikit-image | skimage/io/_plugins/fits_plugin.py | 2 | 4636 | __all__ = ['imread', 'imread_collection']
import skimage.io as io
try:
import pyfits
except ImportError:
raise ImportError("PyFITS could not be found. Please refer to\n"
"http://www.stsci.edu/resources/software_hardware/pyfits\n"
"for further instructions.")
def imread(fname, dtype=None):
"""Load an image from a FITS file.
Parameters
----------
fname : string
Image file name, e.g. ``test.fits``.
dtype : dtype, optional
For FITS, this argument is ignored because Stefan is planning on
removing the dtype argument from imread anyway.
Returns
-------
img_array : ndarray
Unlike plugins such as PIL, where different colour bands/channels are
stored in the third dimension, FITS images are greyscale-only and can
be N-dimensional, so an array of the native FITS dimensionality is
returned, without colour channels.
Currently if no image is found in the file, None will be returned
Notes
-----
Currently FITS ``imread()`` always returns the first image extension when
given a Multi-Extension FITS file; use ``imread_collection()`` (which does
lazy loading) to get all the extensions at once.
"""
hdulist = pyfits.open(fname)
# Iterate over FITS image extensions, ignoring any other extension types
# such as binary tables, and get the first image data array:
img_array = None
for hdu in hdulist:
if isinstance(hdu, pyfits.ImageHDU) or \
isinstance(hdu, pyfits.PrimaryHDU):
if hdu.data is not None:
img_array = hdu.data
break
hdulist.close()
return img_array
def imread_collection(load_pattern, conserve_memory=True):
"""Load a collection of images from one or more FITS files
Parameters
----------
load_pattern : str or list
List of extensions to load. Filename globbing is currently
unsupported.
converve_memory : bool
If True, never keep more than one in memory at a specific
time. Otherwise, images will be cached once they are loaded.
Returns
-------
ic : ImageCollection
Collection of images.
"""
intype = type(load_pattern)
if intype is not list and intype is not str:
raise TypeError("Input must be a filename or list of filenames")
# Ensure we have a list, otherwise we'll end up iterating over the string:
if intype is not list:
load_pattern = [load_pattern]
# Generate a list of filename/extension pairs by opening the list of
# files and finding the image extensions in each one:
ext_list = []
for filename in load_pattern:
hdulist = pyfits.open(filename)
for n, hdu in zip(range(len(hdulist)), hdulist):
if isinstance(hdu, pyfits.ImageHDU) or \
isinstance(hdu, pyfits.PrimaryHDU):
# Ignore (primary) header units with no data (use '.size'
# rather than '.data' to avoid actually loading the image):
try:
data_size = hdu.size()
except TypeError: # (size changed to int in PyFITS 3.1)
data_size = hdu.size
if data_size > 0:
ext_list.append((filename, n))
hdulist.close()
return io.ImageCollection(ext_list, load_func=FITSFactory,
conserve_memory=conserve_memory)
def FITSFactory(image_ext):
"""Load an image extension from a FITS file and return a NumPy array
Parameters
----------
image_ext : tuple
FITS extension to load, in the format ``(filename, ext_num)``.
The FITS ``(extname, extver)`` format is unsupported, since this
function is not called directly by the user and
``imread_collection()`` does the work of figuring out which
extensions need loading.
"""
# Expect a length-2 tuple with a filename as the first element:
if not isinstance(image_ext, tuple):
raise TypeError("Expected a tuple")
if len(image_ext) != 2:
raise ValueError("Expected a tuple of length 2")
filename = image_ext[0]
extnum = image_ext[1]
if type(filename) is not str or type(extnum) is not int:
raise ValueError("Expected a (filename, extension) tuple")
hdulist = pyfits.open(filename)
data = hdulist[extnum].data
hdulist.close()
if data is None:
raise RuntimeError("Extension %d of %s has no data" %
(extnum, filename))
return data
| bsd-3-clause |
Mageluer/computational_physics_N2014301040052 | final/code/perceptron_model/flowers_1v1.py | 1 | 1855 | import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.colors as mcl
from sklearn import datasets
import SVM1v1 as pc
"""
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
y[:20]=0
y[50:70]=2
X = df.iloc[0:100, [0, 2]].values
"""
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target[:]
"""
pl.scatter(X[:50,0],X[:50,1],color='red',marker='o',label='setosa')
pl.scatter(X[50:100,0],X[50:100,1],color='blue',marker='x',label='versicolor')
pl.xlabel('petal length')
pl.ylabel('sepal length')
pl.legend(loc='upper left')
pl.show()
"""
ppn = pc.SVM(eta = 0.01, n_iter = 10)
ppn.fit(X, y)
"""
pl.plot(range(1, len(ppn.cost_) + 1), ppn.cost_, marker = 'o')
pl.xlabel('Epoches')
pl.ylabel('Number of misclassifications')
pl.show()
"""
def plot_decision_region(X, y, classifier, resolution=0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = mcl.ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
pl.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
pl.xlim(xx1.min(), xx1.max())
pl.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
pl.scatter(x=X[y == cl, 0],y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
plot_decision_region(X, y, classifier=ppn)
pl.xlabel('sepal length [cm]')
pl.ylabel('petal length [cm]')
pl.legend(loc='upper left')
pl.show()
| mit |
Don42/youtube-dl | youtube_dl/extractor/adobetv.py | 96 | 4630 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
float_or_none,
ISO639Utils,
)
class AdobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration') or
self._search_regex(
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
_TEST = {
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
'url': 'https://video.tv.adobe.com/v/2456/',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_params = self._parse_json(self._search_regex(
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
video_id)
formats = [{
'url': source['src'],
'width': source.get('width'),
'height': source.get('height'),
'tbr': source.get('bitrate'),
} for source in player_params['sources']]
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
for source in player_params['sources']]))
subtitles = {}
for translation in player_params.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
subtitles[lang_id].append({
'url': translation['vttPath'],
'ext': 'vtt',
})
return {
'id': video_id,
'formats': formats,
'title': player_params['title'],
'description': self._og_search_description(webpage),
'duration': duration,
'subtitles': subtitles,
}
| unlicense |
fos/fos-legacy | scratch/4gletools/outline_render_brain.py | 1 | 2254 | from __future__ import with_statement
from contextlib import nested
import pyglet
from gletools import (
ShaderProgram, FragmentShader, VertexShader, Depthbuffer,
Texture, Projection, UniformArray, Lighting, Color
)
from gletools.gl import *
from util import Mesh, Processor, Kernel, offsets, gl_init
from gaussian import Gaussian
### setup ###
window = pyglet.window.Window()
projection = Projection(0, 0, window.width, window.height, near=18, far=50)
texture = Texture(window.width, window.height, GL_RGBA32F)
bunny = Mesh('meshes/brain')
processor = Processor(texture)
### Shaders and helpers ###
depth = ShaderProgram(
VertexShader.open('shaders/normal.vert'),
FragmentShader.open('shaders/depth.frag'),
)
average = ShaderProgram(
VertexShader.open('shaders/normal.vert'),
FragmentShader.open('shaders/convolution.frag'),
kernel_size = 3*3,
kernel = UniformArray(float, 1, [
1, 1, 1,
1, 1, 1,
1, 1, 1,
]),
output_factor = 1.0/9.0,
input_factor = 1.0,
offsets = offsets(-1, 1, window),
)
laplace = ShaderProgram(
FragmentShader.open('shaders/convolution.frag'),
kernel_size = 3*3,
kernel = UniformArray(float, 1, [
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
]),
input_factor = 100.0,
output_factor = 1.0/9.0,
offsets = offsets(-1, 1, window),
)
invert = ShaderProgram(
FragmentShader.open('shaders/invert.frag'),
)
### Application code ###
angle = 0.0
def simulate(delta):
global angle
angle += 10.0 * delta
pyglet.clock.schedule_interval(simulate, 0.01)
@window.event
def on_draw():
window.clear()
with nested(processor.renderto(texture), projection, Lighting, Color, depth):
glClearColor(0.0,0.0,0.0,0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glPushMatrix()
glTranslatef(0, 0, -40)
glRotatef(-65, 1, 0, 0)
glRotatef(angle, 0.0, 0.0, 1.0)
glRotatef(90, 1, 0, 0)
glColor4f(0.5, 0.0, 0.0, 1.0)
bunny.draw()
glPopMatrix()
processor.filter(texture, laplace)
processor.filter(texture, invert)
processor.blit(texture)
if __name__ == '__main__':
gl_init()
pyglet.app.run()
| bsd-3-clause |
pgleeson/TestArea | lib/jython/Lib/test/cjkencodings_test.py | 34 | 65909 | teststring = {
'big5': (
"\xa6\x70\xa6\xf3\xa6\x62\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xa4"
"\xa8\xcf\xa5\xce\xac\x4a\xa6\xb3\xaa\xba\x20\x43\x20\x6c\x69\x62"
"\x72\x61\x72\x79\x3f\x0a\xa1\x40\xa6\x62\xb8\xea\xb0\x54\xac\xec"
"\xa7\xde\xa7\xd6\xb3\x74\xb5\x6f\xae\x69\xaa\xba\xa4\xb5\xa4\xd1"
"\x2c\x20\xb6\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xb3\x6e\xc5\xe9"
"\xaa\xba\xb3\x74\xab\xd7\xac\x4f\xa4\xa3\xae\x65\xa9\xbf\xb5\xf8"
"\xaa\xba\x0a\xbd\xd2\xc3\x44\x2e\x20\xac\xb0\xa5\x5b\xa7\xd6\xb6"
"\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xaa\xba\xb3\x74\xab\xd7\x2c"
"\x20\xa7\xda\xad\xcc\xab\x4b\xb1\x60\xa7\xc6\xb1\xe6\xaf\xe0\xa7"
"\x51\xa5\xce\xa4\x40\xa8\xc7\xa4\x77\xb6\x7d\xb5\x6f\xa6\x6e\xaa"
"\xba\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xa8\xc3\xa6\xb3\xa4"
"\x40\xad\xd3\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79"
"\x70\x69\x6e\x67\x20\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d"
"\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20\xa5\x69\x0a"
"\xa8\xd1\xa8\xcf\xa5\xce\x2e\x20\xa5\xd8\xab\x65\xa6\xb3\xb3\x5c"
"\xb3\x5c\xa6\x68\xa6\x68\xaa\xba\x20\x6c\x69\x62\x72\x61\x72\x79"
"\x20\xac\x4f\xa5\x48\x20\x43\x20\xbc\x67\xa6\xa8\x2c\x20\xa6\xd3"
"\x20\x50\x79\x74\x68\x6f\x6e\x20\xac\x4f\xa4\x40\xad\xd3\x0a\x66"
"\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20"
"\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c"
"\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xac\x47\xa7\xda\xad\xcc\xa7"
"\xc6\xb1\xe6\xaf\xe0\xb1\x4e\xac\x4a\xa6\xb3\xaa\xba\x0a\x43\x20"
"\x6c\x69\x62\x72\x61\x72\x79\x20\xae\xb3\xa8\xec\x20\x50\x79\x74"
"\x68\x6f\x6e\x20\xaa\xba\xc0\xf4\xb9\xd2\xa4\xa4\xb4\xfa\xb8\xd5"
"\xa4\xce\xbe\xe3\xa6\x58\x2e\x20\xa8\xe4\xa4\xa4\xb3\xcc\xa5\x44"
"\xad\x6e\xa4\x5d\xac\x4f\xa7\xda\xad\xcc\xa9\xd2\x0a\xad\x6e\xb0"
"\x51\xbd\xd7\xaa\xba\xb0\xdd\xc3\x44\xb4\x4e\xac\x4f\x3a\x0a\x0a",
"\xe5\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e"
"\x20\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89"
"\xe7\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3"
"\x80\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a"
"\x80\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84"
"\xe4\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f"
"\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84"
"\xe9\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5"
"\xbf\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e"
"\x20\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc"
"\xe5\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5"
"\xba\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8"
"\xe5\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4"
"\xb8\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5"
"\xbd\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8"
"\xa6\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20"
"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20"
"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67"
"\x75\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7"
"\x94\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1"
"\xe8\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62"
"\x72\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf"
"\xab\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e"
"\x20\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20"
"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20"
"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67"
"\x75\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5"
"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c"
"\x89\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6"
"\x8b\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84"
"\xe7\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5"
"\x8f\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad"
"\xe6\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6"
"\x88\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8"
"\xab\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98"
"\xaf\x3a\x0a\x0a"),
'big5hkscs': (
"\x88\x45\x88\x5c\x8a\x73\x8b\xda\x8d\xd8\x0a",
"\xf0\xa0\x84\x8c\xc4\x9a\xe9\xb5\xae\xe7\xbd\x93\xe6\xb4\x86\x0a"),
'cp949': (
"\x8c\x63\xb9\xe6\xb0\xa2\xc7\xcf\x20\xbc\x84\xbd\xc3\xc4\xdd\xb6"
"\xf3\x0a\x0a\xa8\xc0\xa8\xc0\xb3\xb3\x21\x21\x20\xec\xd7\xce\xfa"
"\xea\xc5\xc6\xd0\x92\xe6\x90\x70\xb1\xc5\x20\xa8\xde\xa8\xd3\xc4"
"\x52\xa2\xaf\xa2\xaf\xa2\xaf\x20\xb1\xe0\x8a\x96\x20\xa8\xd1\xb5"
"\xb3\x20\xa8\xc0\x2e\x20\x2e\x0a\xe4\xac\xbf\xb5\xa8\xd1\xb4\xc9"
"\xc8\xc2\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xbc\xad\xbf\xef\xb7"
"\xef\x20\xb5\xaf\xc7\xd0\xeb\xe0\x20\xca\xab\xc4\x52\x20\x21\x20"
"\x21\x20\x21\xa4\xd0\x2e\xa4\xd0\x0a\xc8\xe5\xc8\xe5\xc8\xe5\x20"
"\xa4\xa1\xa4\xa1\xa4\xa1\xa1\xd9\xa4\xd0\x5f\xa4\xd0\x20\xbe\xee"
"\x90\x8a\x20\xc5\xcb\xc4\xe2\x83\x4f\x20\xb5\xae\xc0\xc0\x20\xaf"
"\x68\xce\xfa\xb5\xe9\xeb\xe0\x20\xa8\xc0\xb5\xe5\x83\x4f\x0a\xbc"
"\xb3\x90\x6a\x20\xca\xab\xc4\x52\x20\x2e\x20\x2e\x20\x2e\x20\x2e"
"\x20\xb1\xbc\xbe\xd6\x9a\x66\x20\xa8\xd1\xb1\xc5\x20\xa8\xde\x90"
"\x74\xa8\xc2\x83\x4f\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9"
"\xb1\xee\xa3\x8e\x0a\xbf\xcd\xbe\xac\xc4\x52\x20\x21\x20\x21\x20"
"\xe4\xac\xbf\xb5\xa8\xd1\x20\xca\xab\xb4\xc9\xb1\xc5\x20\xa1\xd9"
"\xdf\xbe\xb0\xfc\x20\xbe\xf8\xb4\xc9\xb1\xc5\xb4\xc9\x20\xe4\xac"
"\xb4\xc9\xb5\xd8\xc4\x52\x20\xb1\xdb\xbe\xd6\x8a\xdb\x0a\xa8\xde"
"\xb7\xc1\xb5\xe0\xce\xfa\x20\x9a\xc3\xc7\xb4\xbd\xa4\xc4\x52\x20"
"\xbe\xee\x90\x8a\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9\x9a"
"\xc4\xa8\xef\xb5\xe9\x9d\xda\x21\x21\x20\xa8\xc0\xa8\xc0\xb3\xb3"
"\xa2\xbd\x20\xa1\xd2\xa1\xd2\x2a\x0a\x0a",
"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2"
"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89"
"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88"
"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1"
"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d"
"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20"
"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a"
"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8"
"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae"
"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85"
"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3"
"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20"
"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90"
"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93"
"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec"
"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e"
"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2"
"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea"
"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4"
"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed"
"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94"
"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8"
"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a"
"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea"
"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb"
"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed"
"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81"
"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb"
"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82"
"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"),
'euc_jisx0213': (
"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1"
"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9"
"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9"
"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64"
"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6"
"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5"
"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7"
"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6"
"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20"
"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb"
"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4"
"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4"
"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4"
"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5"
"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4"
"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1"
"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3"
"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3"
"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4"
"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75"
"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2"
"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5"
"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4"
"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4"
"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8"
"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5"
"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4"
"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9"
"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4"
"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce"
"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec"
"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8"
"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7"
"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0"
"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea"
"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4"
"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e"
"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe"
"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4"
"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4"
"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab"
"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2"
"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca"
"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5"
"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9"
"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50"
"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc"
"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a\xa5\xce\xa4\xf7\x20\xa5\xfe\x20"
"\xa5\xc8\xa5\xad\xaf\xac\xaf\xda\x20\xcf\xe3\x8f\xfe\xd8\x20\x8f"
"\xfe\xd4\x8f\xfe\xe8\x8f\xfc\xd6\x0a",
"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20"
"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6"
"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80"
"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"),
'euc_jp': (
"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1"
"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9"
"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9"
"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64"
"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6"
"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5"
"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7"
"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6"
"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20"
"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb"
"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4"
"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4"
"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4"
"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5"
"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4"
"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1"
"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3"
"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3"
"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4"
"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75"
"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2"
"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5"
"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4"
"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4"
"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8"
"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5"
"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4"
"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9"
"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4"
"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce"
"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec"
"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8"
"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7"
"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0"
"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea"
"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4"
"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e"
"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe"
"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4"
"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4"
"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab"
"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2"
"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca"
"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5"
"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9"
"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50"
"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc"
"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a",
"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
"\x99\xe3\x80\x82\x0a\x0a"),
'euc_kr': (
"\xa1\xdd\x20\xc6\xc4\xc0\xcc\xbd\xe3\x28\x50\x79\x74\x68\x6f\x6e"
"\x29\xc0\xba\x20\xb9\xe8\xbf\xec\xb1\xe2\x20\xbd\xb1\xb0\xed\x2c"
"\x20\xb0\xad\xb7\xc2\xc7\xd1\x20\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1"
"\xb9\xd6\x20\xbe\xf0\xbe\xee\xc0\xd4\xb4\xcf\xb4\xd9\x2e\x20\xc6"
"\xc4\xc0\xcc\xbd\xe3\xc0\xba\x0a\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce"
"\x20\xb0\xed\xbc\xf6\xc1\xd8\x20\xb5\xa5\xc0\xcc\xc5\xcd\x20\xb1"
"\xb8\xc1\xb6\xbf\xcd\x20\xb0\xa3\xb4\xdc\xc7\xcf\xc1\xf6\xb8\xb8"
"\x20\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce\x20\xb0\xb4\xc3\xbc\xc1\xf6"
"\xc7\xe2\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1\xb9\xd6\xc0\xbb\x0a\xc1"
"\xf6\xbf\xf8\xc7\xd5\xb4\xcf\xb4\xd9\x2e\x20\xc6\xc4\xc0\xcc\xbd"
"\xe3\xc0\xc7\x20\xbf\xec\xbe\xc6\x28\xe9\xd0\xe4\xba\x29\xc7\xd1"
"\x20\xb9\xae\xb9\xfd\xb0\xfa\x20\xb5\xbf\xc0\xfb\x20\xc5\xb8\xc0"
"\xcc\xc7\xce\x2c\x20\xb1\xd7\xb8\xae\xb0\xed\x20\xc0\xce\xc5\xcd"
"\xc7\xc1\xb8\xae\xc6\xc3\x0a\xc8\xaf\xb0\xe6\xc0\xba\x20\xc6\xc4"
"\xc0\xcc\xbd\xe3\xc0\xbb\x20\xbd\xba\xc5\xa9\xb8\xb3\xc6\xc3\xb0"
"\xfa\x20\xbf\xa9\xb7\xc1\x20\xba\xd0\xbe\xdf\xbf\xa1\xbc\xad\xbf"
"\xcd\x20\xb4\xeb\xba\xce\xba\xd0\xc0\xc7\x20\xc7\xc3\xb7\xa7\xc6"
"\xfb\xbf\xa1\xbc\xad\xc0\xc7\x20\xba\xfc\xb8\xa5\x0a\xbe\xd6\xc7"
"\xc3\xb8\xae\xc4\xc9\xc0\xcc\xbc\xc7\x20\xb0\xb3\xb9\xdf\xc0\xbb"
"\x20\xc7\xd2\x20\xbc\xf6\x20\xc0\xd6\xb4\xc2\x20\xc0\xcc\xbb\xf3"
"\xc0\xfb\xc0\xce\x20\xbe\xf0\xbe\xee\xb7\xce\x20\xb8\xb8\xb5\xe9"
"\xbe\xee\xc1\xdd\xb4\xcf\xb4\xd9\x2e\x0a\x0a",
"\xe2\x97\x8e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\x28\x50\x79"
"\x74\x68\x6f\x6e\x29\xec\x9d\x80\x20\xeb\xb0\xb0\xec\x9a\xb0\xea"
"\xb8\xb0\x20\xec\x89\xbd\xea\xb3\xa0\x2c\x20\xea\xb0\x95\xeb\xa0"
"\xa5\xed\x95\x9c\x20\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8\xeb\x9e"
"\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96\xb4\xec\x9e\x85\xeb\x8b"
"\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec"
"\x9d\x80\x0a\xed\x9a\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20"
"\xea\xb3\xa0\xec\x88\x98\xec\xa4\x80\x20\xeb\x8d\xb0\xec\x9d\xb4"
"\xed\x84\xb0\x20\xea\xb5\xac\xec\xa1\xb0\xec\x99\x80\x20\xea\xb0"
"\x84\xeb\x8b\xa8\xed\x95\x98\xec\xa7\x80\xeb\xa7\x8c\x20\xed\x9a"
"\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20\xea\xb0\x9d\xec\xb2"
"\xb4\xec\xa7\x80\xed\x96\xa5\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8"
"\xeb\x9e\x98\xeb\xb0\x8d\xec\x9d\x84\x0a\xec\xa7\x80\xec\x9b\x90"
"\xed\x95\xa9\xeb\x8b\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d"
"\xb4\xec\x8d\xac\xec\x9d\x98\x20\xec\x9a\xb0\xec\x95\x84\x28\xe5"
"\x84\xaa\xe9\x9b\x85\x29\xed\x95\x9c\x20\xeb\xac\xb8\xeb\xb2\x95"
"\xea\xb3\xbc\x20\xeb\x8f\x99\xec\xa0\x81\x20\xed\x83\x80\xec\x9d"
"\xb4\xed\x95\x91\x2c\x20\xea\xb7\xb8\xeb\xa6\xac\xea\xb3\xa0\x20"
"\xec\x9d\xb8\xed\x84\xb0\xed\x94\x84\xeb\xa6\xac\xed\x8c\x85\x0a"
"\xed\x99\x98\xea\xb2\xbd\xec\x9d\x80\x20\xed\x8c\x8c\xec\x9d\xb4"
"\xec\x8d\xac\xec\x9d\x84\x20\xec\x8a\xa4\xed\x81\xac\xeb\xa6\xbd"
"\xed\x8c\x85\xea\xb3\xbc\x20\xec\x97\xac\xeb\xa0\xa4\x20\xeb\xb6"
"\x84\xec\x95\xbc\xec\x97\x90\xec\x84\x9c\xec\x99\x80\x20\xeb\x8c"
"\x80\xeb\xb6\x80\xeb\xb6\x84\xec\x9d\x98\x20\xed\x94\x8c\xeb\x9e"
"\xab\xed\x8f\xbc\xec\x97\x90\xec\x84\x9c\xec\x9d\x98\x20\xeb\xb9"
"\xa0\xeb\xa5\xb8\x0a\xec\x95\xa0\xed\x94\x8c\xeb\xa6\xac\xec\xbc"
"\x80\xec\x9d\xb4\xec\x85\x98\x20\xea\xb0\x9c\xeb\xb0\x9c\xec\x9d"
"\x84\x20\xed\x95\xa0\x20\xec\x88\x98\x20\xec\x9e\x88\xeb\x8a\x94"
"\x20\xec\x9d\xb4\xec\x83\x81\xec\xa0\x81\xec\x9d\xb8\x20\xec\x96"
"\xb8\xec\x96\xb4\xeb\xa1\x9c\x20\xeb\xa7\x8c\xeb\x93\xa4\xec\x96"
"\xb4\xec\xa4\x8d\xeb\x8b\x88\xeb\x8b\xa4\x2e\x0a\x0a"),
'gb18030': (
"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e"
"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20"
"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3"
"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd"
"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc"
"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba"
"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3"
"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9"
"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb"
"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c"
"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b"
"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74"
"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72"
"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20"
"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3"
"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72"
"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c"
"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82"
"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69"
"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e"
"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2"
"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4"
"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20"
"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c"
"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7"
"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a"
"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7"
"\x3a\x0a\x83\x35\xc7\x31\x83\x33\x9a\x33\x83\x32\xb1\x31\x83\x33"
"\x95\x31\x20\x82\x37\xd1\x36\x83\x30\x8c\x34\x83\x36\x84\x33\x20"
"\x82\x38\x89\x35\x82\x38\xfb\x36\x83\x33\x95\x35\x20\x83\x33\xd5"
"\x31\x82\x39\x81\x35\x20\x83\x30\xfd\x39\x83\x33\x86\x30\x20\x83"
"\x34\xdc\x33\x83\x35\xf6\x37\x83\x35\x97\x35\x20\x83\x35\xf9\x35"
"\x83\x30\x91\x39\x82\x38\x83\x39\x82\x39\xfc\x33\x83\x30\xf0\x34"
"\x20\x83\x32\xeb\x39\x83\x32\xeb\x35\x82\x39\x83\x39\x2e\x0a\x0a",
"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5"
"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20"
"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7"
"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80"
"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80"
"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4"
"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a"
"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9"
"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf"
"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20"
"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5"
"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba"
"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5"
"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8"
"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd"
"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6"
"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70"
"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94"
"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8"
"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72"
"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab"
"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20"
"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70"
"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8"
"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89"
"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b"
"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7"
"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f"
"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6"
"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88"
"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab"
"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf"
"\x3a\x0a\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec\x9d\x80\x20\xea"
"\xb0\x95\xeb\xa0\xa5\xed\x95\x9c\x20\xea\xb8\xb0\xeb\x8a\xa5\xec"
"\x9d\x84\x20\xec\xa7\x80\xeb\x8b\x8c\x20\xeb\xb2\x94\xec\x9a\xa9"
"\x20\xec\xbb\xb4\xed\x93\xa8\xed\x84\xb0\x20\xed\x94\x84\xeb\xa1"
"\x9c\xea\xb7\xb8\xeb\x9e\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96"
"\xb4\xeb\x8b\xa4\x2e\x0a\x0a"),
'gb2312': (
"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
"\xa1\xa3\x0a\x0a",
"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\x0a"),
'gbk': (
"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e"
"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20"
"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3"
"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd"
"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc"
"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba"
"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3"
"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9"
"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb"
"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c"
"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b"
"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74"
"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72"
"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20"
"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3"
"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72"
"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c"
"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82"
"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69"
"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e"
"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2"
"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4"
"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20"
"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c"
"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7"
"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a"
"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7"
"\x3a\x0a\x0a",
"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5"
"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20"
"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7"
"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80"
"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80"
"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4"
"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a"
"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9"
"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf"
"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20"
"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5"
"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba"
"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5"
"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8"
"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd"
"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6"
"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70"
"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94"
"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8"
"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72"
"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab"
"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20"
"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70"
"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8"
"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89"
"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b"
"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7"
"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f"
"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6"
"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88"
"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab"
"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf"
"\x3a\x0a\x0a"),
'johab': (
"\x99\xb1\xa4\x77\x88\x62\xd0\x61\x20\xcd\x5c\xaf\xa1\xc5\xa9\x9c"
"\x61\x0a\x0a\xdc\xc0\xdc\xc0\x90\x73\x21\x21\x20\xf1\x67\xe2\x9c"
"\xf0\x55\xcc\x81\xa3\x89\x9f\x85\x8a\xa1\x20\xdc\xde\xdc\xd3\xd2"
"\x7a\xd9\xaf\xd9\xaf\xd9\xaf\x20\x8b\x77\x96\xd3\x20\xdc\xd1\x95"
"\x81\x20\xdc\xc0\x2e\x20\x2e\x0a\xed\x3c\xb5\x77\xdc\xd1\x93\x77"
"\xd2\x73\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xac\xe1\xb6\x89\x9e"
"\xa1\x20\x95\x65\xd0\x62\xf0\xe0\x20\xe0\x3b\xd2\x7a\x20\x21\x20"
"\x21\x20\x21\x87\x41\x2e\x87\x41\x0a\xd3\x61\xd3\x61\xd3\x61\x20"
"\x88\x41\x88\x41\x88\x41\xd9\x69\x87\x41\x5f\x87\x41\x20\xb4\xe1"
"\x9f\x9a\x20\xc8\xa1\xc5\xc1\x8b\x7a\x20\x95\x61\xb7\x77\x20\xc3"
"\x97\xe2\x9c\x97\x69\xf0\xe0\x20\xdc\xc0\x97\x61\x8b\x7a\x0a\xac"
"\xe9\x9f\x7a\x20\xe0\x3b\xd2\x7a\x20\x2e\x20\x2e\x20\x2e\x20\x2e"
"\x20\x8a\x89\xb4\x81\xae\xba\x20\xdc\xd1\x8a\xa1\x20\xdc\xde\x9f"
"\x89\xdc\xc2\x8b\x7a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9"
"\x8c\x61\xbb\x9a\x0a\xb5\xc1\xb2\xa1\xd2\x7a\x20\x21\x20\x21\x20"
"\xed\x3c\xb5\x77\xdc\xd1\x20\xe0\x3b\x93\x77\x8a\xa1\x20\xd9\x69"
"\xea\xbe\x89\xc5\x20\xb4\xf4\x93\x77\x8a\xa1\x93\x77\x20\xed\x3c"
"\x93\x77\x96\xc1\xd2\x7a\x20\x8b\x69\xb4\x81\x97\x7a\x0a\xdc\xde"
"\x9d\x61\x97\x41\xe2\x9c\x20\xaf\x81\xce\xa1\xae\xa1\xd2\x7a\x20"
"\xb4\xe1\x9f\x9a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9\xaf"
"\x82\xdc\xef\x97\x69\xb4\x7a\x21\x21\x20\xdc\xc0\xdc\xc0\x90\x73"
"\xd9\xbd\x20\xd9\x62\xd9\x62\x2a\x0a\x0a",
"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2"
"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89"
"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88"
"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1"
"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d"
"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20"
"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a"
"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8"
"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae"
"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85"
"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3"
"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20"
"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90"
"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93"
"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec"
"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e"
"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2"
"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea"
"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4"
"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed"
"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94"
"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8"
"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a"
"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea"
"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb"
"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed"
"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81"
"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb"
"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82"
"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"),
'shift_jis': (
"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81"
"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7"
"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7"
"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64"
"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b"
"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83"
"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76"
"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4"
"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20"
"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9"
"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2"
"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82"
"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82"
"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83"
"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82"
"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89"
"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52"
"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93"
"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82"
"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75"
"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0"
"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3"
"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82"
"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82"
"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c"
"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83"
"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82"
"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95"
"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82"
"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc"
"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea"
"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6"
"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5"
"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe"
"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8"
"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2"
"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e"
"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f"
"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82"
"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82"
"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9"
"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0"
"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8"
"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85"
"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7"
"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50"
"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b"
"\x82\xc5\x82\xb7\x81\x42\x0a\x0a",
"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
"\x99\xe3\x80\x82\x0a\x0a"),
'shift_jisx0213': (
"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81"
"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7"
"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7"
"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64"
"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b"
"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83"
"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76"
"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4"
"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20"
"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9"
"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2"
"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82"
"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82"
"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83"
"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82"
"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89"
"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52"
"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93"
"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82"
"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75"
"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0"
"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3"
"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82"
"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82"
"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c"
"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83"
"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82"
"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95"
"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82"
"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc"
"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea"
"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6"
"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5"
"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe"
"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8"
"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2"
"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e"
"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f"
"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82"
"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82"
"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9"
"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0"
"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8"
"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85"
"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7"
"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50"
"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b"
"\x82\xc5\x82\xb7\x81\x42\x0a\x0a\x83\x6d\x82\xf5\x20\x83\x9e\x20"
"\x83\x67\x83\x4c\x88\x4b\x88\x79\x20\x98\x83\xfc\xd6\x20\xfc\xd2"
"\xfc\xe6\xfb\xd4\x0a",
"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20"
"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6"
"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80"
"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"),
}
| gpl-2.0 |
YueLinHo/Subversion | tools/dev/wc-format.py | 1 | 1957 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sqlite3
import sys
MIN_SINGLE_DB_FORMAT = 19
def get_format(wc_path):
entries = os.path.join(wc_path, '.svn', 'entries')
wc_db = os.path.join(wc_path, '.svn', 'wc.db')
formatno = 'not under version control'
if os.path.exists(wc_db):
conn = sqlite3.connect(wc_db)
curs = conn.cursor()
curs.execute('pragma user_version;')
formatno = curs.fetchone()[0]
elif os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_path):
parent_path = os.path.dirname(os.path.abspath(wc_path))
if wc_path != parent_path:
formatno = get_format(parent_path)
if formatno >= MIN_SINGLE_DB_FORMAT:
return formatno
return formatno
def print_format(wc_path):
# see subversion/libsvn_wc/wc.h for format values and information
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format 29
formatno = get_format(wc_path)
print('%s: %s' % (wc_path, formatno))
if __name__ == '__main__':
paths = sys.argv[1:]
if not paths:
paths = ['.']
for wc_path in paths:
print_format(wc_path)
| apache-2.0 |
mgit-at/ansible | lib/ansible/modules/cloud/openstack/os_stack.py | 7 | 9217 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <[email protected]>
# (c) 2016, Steve Baker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author:
- "Mathieu Bultel (@matbu)"
- "Steve Baker (@steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
tag:
description:
- Tag for the stack that should be created, name could be char and digit, no space
version_added: "2.5"
template:
description:
- Path of the template file to use for the stack creation
environment:
description:
- List of environment files that should be used for the stack creation
parameters:
description:
- Dictionary of parameters for the stack creation
rollback:
description:
- Rollback stack creation
type: bool
default: 'yes'
timeout:
description:
- Maximum number of seconds to wait for the stack creation
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
tag: "{{ tag_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
returned: always
stack:
description: stack info
type: complex
returned: always
contains:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
from ansible.module_utils._text import to_native
def _create_stack(module, stack, cloud, sdk):
try:
stack = cloud.create_stack(module.params['name'],
tags=module.params['tag'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _update_stack(module, stack, cloud, sdk):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
tag=dict(required=False, default=None),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
sdk, cloud = openstack_cloud_from_module(module)
try:
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud, sdk)
else:
stack = _update_stack(module, stack, cloud, sdk)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
lmazuel/ansible | lib/ansible/modules/commands/raw.py | 56 | 3569 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: raw
short_description: Executes a low-down and dirty SSH command
version_added: historical
options:
free_form:
description:
- the raw module takes a free form command to run. There is no parameter actually named 'free form'; see the examples!
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
- when using privilege escalation (C(become)), a default shell will be assigned if one is not provided
as privilege escalation requires a shell.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to C(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- "If using raw from a playbook, you may need to disable fact gathering
using C(gather_facts: no) if you're using C(raw) to bootstrap python
onto the machine."
- If you want to execute a command securely and predictably, it may be
better to use the M(command) or M(shell) modules instead.
- the C(environment) keyword does not work with raw normally, it requires a shell
which means it only works if C(executable) is set or using the module
with privilege escalation (C(become)).
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
- name: Bootstrap a legacy python 2.4 host
raw: yum -y install python-simplejson
- name: Bootstrap a host without python2 installed
raw: dnf install -y python2 python2-dnf libselinux-python
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
raw: cat < /tmp/*txt
args:
executable: /bin/bash
- name: safely use templated variables. Always use quote filter to avoid injection issues.
raw: "{{package_mgr|quote}} {{pkg_flags|quote}} install {{python_simplejson|quote}}"
'''
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_4/django/contrib/localflavor/se/utils.py | 93 | 2389 | import datetime
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the
"Luhn"-algoritm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s // 10) + 1) * 10) - s
def validate_id_birthday(gd, fix_coordination_number_day=True):
"""
Validates the birth_day and returns the datetime.date object for
the birth_day.
If the date is an invalid birth day, a ValueError will be raised.
"""
today = datetime.date.today()
day = int(gd['day'])
if fix_coordination_number_day and day > 60:
day -= 60
if gd['century'] is None:
# The century was not specified, and need to be calculated from todays date
current_year = today.year
year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year'])
if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'):
year -= 100
# If the person is older than 100 years
if gd['sign'] == '+':
year -= 100
else:
year = int(gd['century'] + gd['year'])
# Make sure the year is valid
# There are no swedish personal identity numbers where year < 1800
if year < 1800:
raise ValueError
# ValueError will be raise for invalid dates
birth_day = datetime.date(year, int(gd['month']), day)
# birth_day must not be in the future
if birth_day > today:
raise ValueError
return birth_day
def format_personal_id_number(birth_day, gd):
# birth_day.strftime cannot be used, since it does not support dates < 1900
return unicode(str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def format_organisation_number(gd):
if gd['century'] is None:
century = ''
else:
century = gd['century']
return unicode(century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def valid_organisation(gd):
return gd['century'] in (None, 16) and \
int(gd['month']) >= 20 and \
gd['sign'] in (None, '-') and \
gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
| mit |
LLNL/spack | var/spack/repos/builtin/packages/libtiff/package.py | 3 | 1103 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libtiff(AutotoolsPackage):
"""LibTIFF - Tag Image File Format (TIFF) Library and Utilities."""
homepage = "http://www.simplesystems.org/libtiff/"
url = "https://download.osgeo.org/libtiff/tiff-4.0.10.tar.gz"
version('4.0.10', sha256='2c52d11ccaf767457db0c46795d9c7d1a8d8f76f68b0b800a3dfe45786b996e4')
version('4.0.9', sha256='6e7bdeec2c310734e734d19aae3a71ebe37a4d842e0e23dbb1b8921c0026cfcd')
version('4.0.8', sha256='59d7a5a8ccd92059913f246877db95a2918e6c04fb9d43fd74e5c3390dac2910')
version('4.0.7', sha256='9f43a2cfb9589e5cecaa66e16bf87f814c945f22df7ba600d63aac4632c4f019')
version('4.0.6', sha256='4d57a50907b510e3049a4bba0d7888930fdfc16ce49f1bf693e5b6247370d68c')
version('3.9.7', sha256='f5d64dd4ce61c55f5e9f6dc3920fbe5a41e02c2e607da7117a35eb5c320cef6a')
depends_on('jpeg')
depends_on('zlib')
depends_on('xz')
| lgpl-2.1 |
rhdekker/collatex | collatex-pythonport/collatex/linsuffarr.py | 2 | 34420 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Linsuffarr: Suffix arrays for natural language processing
In its simplest use as a command line, this Python module performs the linear
construction of the suffix array for the text given on the standard input
(Kärkkäinen and Sanders, Linear work suffix array construction,
Journal of the ACM, vol. 56, pp. 918-936, 2006)
In addition to the construction of suffix array, this module provides
facilities to attach user-defined features to suffixes:
see SuffixArray.addFeature and SuffixArray.addFeatureSA for more details.
This module provides a simple mechanism to load and save suffix arrays along
with the features already defined.
To get more information:
- about the module API, type
$ pydoc linsuffarr
- about the command line usage, type
$ python linsuffarr.py --help
Ported to Python 3 by Ronald Haentjens Dekker
"""
from array import array as _array
from pickle import HIGHEST_PROTOCOL as _HIGHEST_PROTOCOL
from pickle import dumps as _dumps
from pickle import loads as _loads
from gzip import GzipFile
from inspect import getargspec
from optparse import OptionParser
from os.path import getsize
from sys import argv as _argv
from sys import stderr as _stderr
from sys import stdin as _stdin
from sys import stdout as _stdout
from time import time as _time
__version__ = "0.3"
__author__ = "Julien Gosme <[email protected]>"
UNIT_BYTE = 0
UNIT_CHARACTER = 1
UNIT_WORD = 2
DEFAULT_UNIT_STR = "word"
DEFAULT_UNIT = UNIT_WORD
DEFAULT_ENCODING = "utf-8"
COMPRESSION_LEVEL = 1
EXIT_BAD_OPTION = 1
EXIT_ERROR_FILE = 2
_trace=False
def _open(filename, mode="r"):
"""
Universal open file facility.
With normal files, this function behaves as the open builtin.
With gzip-ed files, it decompress or compress according to the specified mode.
In addition, when filename is '-', it opens the standard input or output according to
the specified mode.
Mode are expected to be either 'r' or 'w'.
"""
if filename.endswith(".gz"):
return GzipFile(filename, mode, COMPRESSION_LEVEL)
elif filename == "-":
if mode=="r":
return _stdin
elif mode=="w":
return _stdout
else:
#TODO: set encoding to UTF-8?
return open(filename, mode=mode)
def _radixPass(a, b, r, n, K):
"""
Stable sort of the sequence a according to the keys given in r.
>>> a=range(5)
>>> b=[0]*5
>>> r=[2,1,3,0,4]
>>> _radixPass(a, b, r, 5, 5)
>>> b
[3, 1, 0, 2, 4]
When n is less than the length of a, the end of b must be left unaltered.
>>> b=[5]*5
>>> _radixPass(a, b, r, 2, 2)
>>> b
[1, 0, 5, 5, 5]
>>> _a=a=[1, 0]
>>> b= [0]*2
>>> r=[0, 1]
>>> _radixPass(a, b, r, 2, 2)
>>> a=_a
>>> b
[0, 1]
>>> a=[1, 1]
>>> _radixPass(a, b, r, 2, 2)
>>> b
[1, 1]
>>> a=[0, 1, 1, 0]
>>> b= [0]*4
>>> r=[0, 1]
>>> _radixPass(a, b, r, 4, 2)
>>> a=_a
>>> b
[0, 0, 1, 1]
"""
c = _array("i", [0]*(K+1)) # counter array
for i in range(n): # count occurrences
c[r[a[i]]]+=1
sum=0
for i in range(K+1): # exclusive prefix sums
t = c[i]
c[i] = sum
sum += t
for a_i in a[:n]: # sort
b[c[r[a_i]]] = a_i
c[r[a_i]]+=1
def _nbOperations(n):
"""
Exact number of atomic operations in _radixPass.
"""
if n<2:
return 0
else:
n0=(n+2)//3
n02=n0+n//3
return 3*(n02)+n0+_nbOperations(n02)
def _traceSuffixArray(operations, totalOperations):
if totalOperations==0:
percentage=100.
else:
percentage=float((operations*100)/totalOperations)
print >> _stderr, "Construction %.2f%% (%i/%i)\r"%(percentage,operations, totalOperations),
_stderr.flush()
def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations):
"""
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper.
Find the suffix array SA of s[0..n-1] in {1..K}^n
Require s[n]=s[n+1]=s[n+2]=0, n>=2
"""
if _trace:
_traceSuffixArray(operations, totalOperations)
n0 = (n+2)//3
n1 = (n+1)//3
n2 = n//3
n02 = n0+n2
SA12 = _array("i", [0]*(n02+3))
SA0 = _array("i", [0]*n0)
s0 = _array("i", [0]*n0)
# s12 : positions of mod 1 and mod 2 suffixes
s12 = _array("i", [i for i in range(n+(n0-n1)) if i%3])# <- writing i%3 is more efficient than i%3!=0
s12.extend([0]*3)
# lsb radix sort the mod 1 and mod 2 triples
_radixPass(s12, SA12, s[2:], n02, K)
if _trace:
operations+=n02
_traceSuffixArray(operations, totalOperations)
_radixPass(SA12, s12, s[1:], n02, K)
if _trace:
operations+=n02
_traceSuffixArray(operations, totalOperations)
_radixPass(s12, SA12, s, n02, K)
if _trace:
operations+=n02
_traceSuffixArray(operations, totalOperations)
# find lexicographic names of triples
name = 0
c= _array("i",[-1]*3)
for i in range(n02) :
cSA12=s[SA12[i]:SA12[i]+3]
if cSA12!=c:
name+=1
c=cSA12
if SA12[i] % 3 == 1 :
s12[SA12[i]//3] = name # left half
else :
s12[(SA12[i]//3) + n0] = name # right half
if name < n02 : # recurse if names are not yet unique
operations=_suffixArrayWithTrace(s12, SA12,n02,name+1,operations, totalOperations)
if _trace:
_traceSuffixArray(operations, totalOperations)
# store unique names in s12 using the suffix array
for i,SA12_i in enumerate(SA12[:n02]):
s12[SA12_i] = i + 1
else: #generate the suffix array of s12 directly
if _trace:
operations+=_nbOperations(n02)
_traceSuffixArray(operations, totalOperations)
for i,s12_i in enumerate(s12[:n02]):
SA12[s12_i - 1] = i
# stably sort the mod 0 suffixes from SA12 by their first character
j=0
for SA12_i in SA12[:n02]:
if (SA12_i < n0):
s0[j] = 3*SA12_i
j+=1
_radixPass(s0,SA0,s,n0,K)
if _trace:
operations+=n0
_traceSuffixArray(operations, totalOperations)
# merge sorted SA0 suffixes and sorted SA12 suffixes
p = j = k = 0
t = n0 - n1
while k < n :
if SA12[t] < n0 :# pos of current offset 12 suffix
i = SA12[t] * 3 + 1
else :
i = (SA12[t] - n0 ) * 3 + 2
j = SA0[p]#pos of current offset 0 suffix
if SA12[t] < n0 :
bool = (s[i], s12[SA12[t]+n0]) <= (s[j], s12[int(j/3)])
else :
bool = (s[i], s[i+1], s12[SA12[t]-n0+1]) <= ( s[j], s[j+1], s12[int(j/3)+n0])
if(bool) :
SA[k] = i
t += 1
if t == n02 : # done --- only SA0 suffixes left
k += 1
while p < n0 :
SA[k] = SA0[p]
p += 1
k += 1
else :
SA[k] = j
p += 1
if p == n0 :#done --- only SA12 suffixes left
k += 1
while t < n02 :
if SA12[t] < n0 :# pos of current offset 12 suffix
SA[k] = (SA12[t] * 3) + 1
else :
SA[k] = ((SA12[t] - n0) * 3) + 2
t += 1
k += 1
k += 1
return operations
def _suffixArray(s, SA, n, K):
if(_trace):
totalOperations=_nbOperations(n)
operations=0
else:
totalOperations=0
operations=0
_suffixArrayWithTrace(s, SA, n, K, operations, totalOperations)
if(_trace):
print >> _stderr, ""
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
"""
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2
"""
len1=len(seq1)-start1
len2=len(seq2)-start2
# We set seq2 as the shortest sequence
if len1 < len2:
seq1, seq2 = seq2, seq1
start1, start2 = start2, start1
len1,len2 = len2, len1
# if seq2 is empty returns 0
if len2==0:
return 0
i=0
pos2=start2
for i in range(min(len1, len2)):
#print seq1, seq2, start1, start2
if seq1[start1+i] != seq2[start2+i]:
return i
# we have reached the end of seq2 (need to increment i)
return i+1
def LCP(SA):
"""
Compute the longest common prefix for every adjacent suffixes.
The result is a list of same size as SA.
Given two suffixes at positions i and i+1,
their LCP is stored at position i+1.
A zero is stored at position 0 of the output.
>>> SA=SuffixArray("abba", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i', [0, 1, 0, 1])
>>> SA=SuffixArray("", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("", unit=UNIT_CHARACTER)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("", unit=UNIT_WORD)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("abab", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i', [0, 2, 0, 1])
"""
string=SA.string
length=SA.length
lcps=_array("i", [0]*length)
SA=SA.SA
if _trace:
delta=max(length//100,1)
for i, pos in enumerate(SA):
if i%delta==0:
percent=float((i+1)*100)/length
print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r"%(percent, i+1, length),
lcps[i]=_longestCommonPrefix(string, string, SA[i-1], pos)
else:
for i, pos in enumerate(SA):
lcps[i]=_longestCommonPrefix(string, string, SA[i-1], pos)
if _trace:
print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r"%(100.0, length, length)
if lcps: # Correct the case where string[0] == string[-1]
lcps[0] = 0
return lcps
class SuffixArray(object):
"""
Constructs the suffix array of the string using the processing unit specified.
"""
def __init__(self, string, unit=DEFAULT_UNIT, encoding=DEFAULT_ENCODING, noLCPs=False):
if unit==UNIT_WORD:
self.tokSep=" "
elif unit in (UNIT_CHARACTER, UNIT_BYTE):
self.tokSep=""
else:
raise Exception("Unknown unit type identifier:", unit)
start=_time()
self.unit = unit
self.encoding = encoding
if _trace: print >> _stderr, "Tokenization ...\r",
string = self.tokenize(string)
if _trace: print >> _stderr, "Tokenization done"
if _trace: print >> _stderr, "Renaming tokens ...\r",
self.voc = [None]+sorted(set(string))
self.tokId = dict((char, iChar) for iChar,char in enumerate(self.voc))
self.string = [self.tokId[c] for c in string]
if _trace: print >> _stderr, "Renaming tokens done"
self.vocSize= len(self.voc)
self.length = len(string)
self.SA = _array("i", [0]*(self.length+3))
self.string = _array("i", self.string+[0]*3)
_suffixArray(self.string, self.SA, self.length, self.vocSize)
del self.SA[self.length:]
del self.string[self.length:]
self.nbSentences = self.string.count(self.tokId.get("\n", 0))
self.length = len(string)
self.vocSize= len(self.voc) - 1 # decrement because of the None token
if "\n" in self.tokId:
self.vocSize-=1 # decrement because of the EOL token
self.features=[]
if not noLCPs:
self.addFeatureSA(LCP)
self.constructionTime=_time()-start
if _trace: print >> _stderr, "construction time %.3fs"%self.constructionTime
def addFeatureSA(self, callback, default=None, name=None):
"""
Add a feature to the suffix array.
The callback must return a sequence such that
the feature at position i is attached to the suffix referenced by
self.SA[i].
It is called with one argument: the instance of SuffixArray self.
The callback may traverse self.SA in any fashion.
The default behavior is to name the new feature after the callback name.
To give another name, set the argument name accordingly.
When the feature of an unknown substring of the text is requested,
the value of the default argument is used.
If the feature attached to a suffix is independent of the other suffix
features, then the method addFeature gives a better alternative.
You may use addFeatureSA as a decorator as in the following example.
Example: feature named bigram which attach the frequencies of the
leading bigram to each suffix.
>>> SA=SuffixArray("mississippi", unit=UNIT_BYTE)
>>> def bigram(SA):
... res=[0]*SA.length
... end=0
... while end <= SA.length:
...
... begin=end-1
... while end < SA.length and SA._LCP_values[end]>=2:
... if SA.SA[end]+2<=SA.length: #end of string
... end+=1
...
... nbBigram=end-begin
... for i in xrange(begin, end):
... if SA.SA[i]+2<=SA.length:
... res[i]=nbBigram
...
... end+=1
... return res
>>> SA.addFeatureSA(bigram, 0)
>>> SA._bigram_values
[0, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2]
>>> print str(SA).expandtabs(14) #doctest: +SKIP
... 10 'i' LCP=0 , bigram=0
... 7 'ippi' LCP=1 , bigram=1
... 4 'issippi' LCP=1 , bigram=2
... 1 'ississippi' LCP=4 , bigram=2
... 0 'mississipp' LCP=0 , bigram=1
... 9 'pi' LCP=0 , bigram=1
... 8 'ppi' LCP=1 , bigram=1
... 6 'sippi' LCP=0 , bigram=2
... 3 'sissippi' LCP=2 , bigram=2
... 5 'ssippi' LCP=1 , bigram=2
... 2 'ssissippi' LCP=3 , bigram=2
>>> SA.bigram('ip')
1
>>> SA.bigram('si')
2
>>> SA.bigram('zw')
0
"""
if name is None:
featureName = callback.__name__
else:
featureName = name
featureValues=callback(self)
setattr(self, "_%s_values"%featureName, featureValues)
setattr(self, "%s_default"%featureName, default)
self.features.append(featureName)
def findFeature(substring):
res=self._findOne(substring,)
if res is not False:
return featureValues[res]
else:
return default
setattr(self, featureName, findFeature)
def addFeature(self, callback, default=None, name=None, arguments=None):
"""
Add a feature to the suffix array.
The callback must return the feature corresponding to the suffix at
position self.SA[i].
The callback must be callable (a function or lambda).
The argument names of the callback are used to determine the data
needed. If an argument is the name of feature already defined, then
this argument will be the value of that feature for the current suffix.
In addition the argument pos is the position of the current suffix
and iSA is the index of pos in SA.
Other attributes of the SuffixArray instance may be use as argument
names.
If the feature attached to a suffix depends on other suffix features,
then the method addFeatureSA is the only choice.
"""
if name is None:
featureName=callback.__name__
else:
featureName=name
if arguments is None:
signature=getargspec(callback)[0]
else:
signature=arguments
featureValues=[default]*(self.length)
args=[getattr(self, "_%s_values"%featName) for featName in signature]
#print args
for i, pos in enumerate(self.SA):
arg=[j[i] for j in args]
#print arg
featureValues[i]=callback(*arg)
#end alternative
setattr(self, "_%s_values"%featureName, featureValues)
setattr(self, "%s_default"%featureName, default)
self.features.append(featureName)
def findFeature(substring):
res=self._findOne(substring)
if res:
return featureValues[res]
else:
return default
setattr(self, featureName, findFeature)
def tokenize(self, string):
"""
Tokenizer utility.
When processing byte, outputs the string unaltered.
The character unit type is used for unicode data, the string is
decoded according to the encoding provided.
In the case of word unit, EOL characters are detached from the
preceding word, and outputs the list of words, i.e. the list of non-space strings
separated by space strings.
>>> SA=SuffixArray('abecedaire', UNIT_BYTE)
>>> SA.tokenize('abecedaire')=='abecedaire'
True
>>> len(SA.tokenize('abecedaire'))
10
>>> SA=SuffixArray('abecedaire', UNIT_BYTE, "utf-8")
>>> SA.tokenize('abecedaire')==u'abecedaire'
True
>>> len(SA.tokenize('abecedaire'))
10
>>> SA=SuffixArray('mississippi', UNIT_WORD)
>>> SA.tokenize('miss issi ppi')
['miss', 'issi', 'ppi']
>>> SA.tokenize('miss issi\\nppi')
['miss', 'issi', '\\n', 'ppi']
"""
if self.unit == UNIT_WORD:
# the EOL character is treated as a word, hence a substitution
# before split
return [token for token in string.replace("\n", " \n ").split(self.tokSep) if token!=""]
elif self.unit == UNIT_CHARACTER:
return string.decode(self.encoding)
else:
return string
def reprString(self, string, length):
"""
Output a string of length tokens in the original form.
If string is an integer, it is considered as an offset in the text.
Otherwise string is considered as a sequence of ids (see voc and
tokId).
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString(0, 3)
'mis'
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString([1, 4, 1, 3, 3, 2], 5)
'isipp'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString(0, 3)
'missi ssi ppi'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString([1, 3, 2], 3)
'missi ssi ppi'
"""
if isinstance(string, int):
length=min(length, self.length-string)
string=self.string[string:string+length]
voc=self.voc
res= self.tokSep.join((voc[id] for id in string[:length]))
if self.unit==UNIT_WORD:
res=res.replace(" \n", "\n")
res=res.replace("\n ", "\n")
if self.unit==UNIT_CHARACTER:
res=res.encode(self.encoding)
return res
def __str__(self, start=0, end=-1, maxSuffixLength=10):
"""
Human readable string representation of the suffix array.
"""
string=self.string
SA=self.SA
voc=self.voc
tokSep=self.tokSep
features=self.features
res=[]
if end==-1:
end=self.length
for i, pos in enumerate(SA[start:end]):
suffix=self.reprString(pos, maxSuffixLength)[:maxSuffixLength]
suffix=repr(suffix)
suffix=suffix.ljust(maxSuffixLength+2)
pos=str(pos).rjust(6)
feat=",\t".join(["%s=%s "%(f,repr(getattr(self, "_%s_values"%f)[i])) for f in features])
res.append("%s\t%s\t%s"%(pos, suffix, feat))
return '\n'.join(res)
def toFile(self, filename):
"""
Save the suffix array instance including all features attached in
filename. Accept any filename following the _open conventions,
for example if it ends with .gz the file created will be a compressed
GZip file.
"""
start=_time()
fd=_open(filename, "w")
savedData=[self.string, self.unit, self.voc, self.vocSize, self.SA, self.features]
for featureName in self.features:
featureValues = getattr(self, "_%s_values"%featureName)
featureDefault = getattr(self, "%s_default"%featureName)
savedData.append((featureValues,featureDefault))
fd.write(_dumps(savedData, _HIGHEST_PROTOCOL))
fd.flush()
try:
self.sizeOfSavedFile=getsize(fd.name)
except OSError:#if stdout is used
self.sizeOfSavedFile="-1"
self.toFileTime=_time()-start
if _trace: print >> _stderr, "toFileTime %.2fs"%self.toFileTime
if _trace: print >> _stderr, "sizeOfSavedFile %sb"%self.sizeOfSavedFile
fd.close()
@classmethod
def fromFile(cls, filename):
"""
Load a suffix array instance from filename, a file created by
toFile.
Accept any filename following the _open conventions.
"""
self = cls.__new__(cls) #new instance which does not call __init__
start=_time()
savedData=_loads(_open(filename, "r").read())
# load common attributes
self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6]
self.length=len(self.SA)
# determine token delimiter
if self.unit==UNIT_WORD:
self.tokSep=" "
elif self.unit in (UNIT_CHARACTER, UNIT_BYTE):
self.tokSep=""
else:
raise Exception("Unknown unit type identifier:", self.unit)
# recompute tokId based on voc
self.tokId=dict((char, iChar) for iChar,char in enumerate(self.voc))
self.nbSentences = self.string.count(self.tokId.get("\n", 0))
# Load features
self.features=[]
for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]):
self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault)
self.fromFileTime=_time()-start
if _trace: print >> _stderr, "fromFileTime %.2fs"%self.fromFileTime
return self
def _findOne(self, subString):
"""
>>> SA=SuffixArray("mississippi", unit=UNIT_BYTE)
>>> SA._findOne("ippi")
1
>>> SA._findOne("missi")
4
"""
SA=self.SA
LCPs=self._LCP_values
string=self.string
try:
subString=_array("i", [self.tokId[c] for c in self.tokenize(subString)])
except KeyError:
# if a token of the subString is not in the vocabulary
# the substring can't be in the string
return False
lenSubString=len(subString)
#################################
# Dichotomy search of subString #
#################################
lower=0
upper=self.length
success=False
while upper-lower >0:
middle=(lower+upper)//2
middleSubString=string[SA[middle]:min(SA[middle]+lenSubString,self.length)]
#NOTE: the cmp function is removed in Python 3
#Strictly speaking we are doing one comparison more now
if subString < middleSubString:
upper=middle
elif subString > middleSubString:
lower=middle+1
else:
success=True
break
if not success:
return False
else:
return middle
def find(self, subString, features=[]):
"""
Dichotomy search of subString in the suffix array.
As soon as a suffix which starts with subString is found,
it uses the LCPs in order to find the other matching suffixes.
The outputs consists in a list of tuple (pos, feature0, feature1, ...)
where feature0, feature1, ... are the features attached to the suffix
at position pos.
Features are listed in the same order as requested in the input list of
features [featureName0, featureName1, ...]
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("ssi")
array('i', [5, 2])
>>> SA.find("mi")
array('i', [0])
>>> SA=SuffixArray('miss A and miss B', UNIT_WORD)
>>> SA.find("miss")
array('i', [0, 3])
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("iss", ['LCP'])
[(4, 1), (1, 4)]
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("A")
array('i')
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("pp")
array('i', [8])
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("ppp")
array('i')
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("im")
array('i')
"""
SA=self.SA
LCPs=self._LCP_values
string=self.string
middle=self._findOne(subString)
if middle is False:
return _array('i')
subString=_array("i", [self.tokId[c] for c in self.tokenize(subString)])
lenSubString=len(subString)
###########################################
# Use LCPS to retrieve the other suffixes #
###########################################
lower=middle
upper=middle+1
middleLCP=LCPs[middle]
while lower>0 and LCPs[lower]>=lenSubString:
lower-=1
while upper<self.length and LCPs[upper]>=lenSubString:
upper+=1
###############################################
# When features is empty, outputs a flat list #
###############################################
res=SA[lower:upper]
if len(features)==0:
return res
##############################################
# When features is non empty, outputs a list #
# of tuples (pos, feature_1, feature_2, ...) #
##############################################
else:
features=[getattr(self, "_%s_values"%featureName) for featureName in features]
features=[featureValues[lower:upper] for featureValues in features]
return zip(res, *features)
def parseArgv():
"""
Command line option parser.
"""
parser = OptionParser()
parser.usage=r""" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>]
Create the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use.
UNIT may be set to 'byte', 'character' (given an encoding with the --encoding option) or 'word', which is the default.
"""
parser.add_option("-i", "--input",
action="store", type="string", dest="input",
default=False,
help="Path of the file containing the input text. When '-' is given, read the standard input (default). If the path ends with '.gz', reads the decompressed file.")
parser.add_option("-o", "--output",
action="store", type="string", dest="output",
default=False,
help="Store the suffix array of the input to the file OUTPUT. When '-' is given, writes to the standard output. If the filename ends with '.gz', the suffix array will be stored compressed.")
parser.add_option("", "--load",
action="store", type="string", dest="SAFile",
default=False,
help="Load a suffix array from SAFILE, this option and --input are mutually exclusive.")
parser.add_option("-u", "--unit",
action="store", type="string", dest="unit",
default=DEFAULT_UNIT_STR,
help="Processing unit used for the creation of the suffix array."+\
'Possible values are "byte", "character" and "word". Default is "%s".'%DEFAULT_UNIT_STR+\
"This option is ignored when the suffix array is loaded from SAFILE."+\
'For characters, the input is decoded according to the encoding set via the option --encoding.')
parser.add_option("-e", "--encoding",
action="store", type="string", dest="encoding",
default=DEFAULT_ENCODING,
help="Encoding of the input. This information is required only when processing characters. Default is '%s'."%DEFAULT_ENCODING)
parser.add_option("-p", "--print",
action="store_true", dest="printSA",
default=False,
help="Prints the suffix array in a human readable format to the standard error output.")
parser.add_option("", "--verbose",
action="store_true", dest="verbose",
default=False,
help="Prints more information.")
parser.add_option("", "--no-lcps",
action="store_true", dest="noLCPs",
default=False,
help="Switch off the computation of LCPs. By doing so, the find functions are unusable.")
(options, args) = parser.parse_args(_argv)
strings=args[1:]
return (options, strings)
def main():
"""
Entry point for the standalone script.
"""
(options, strings)=parseArgv()
global _suffixArray, _trace
#############
# Verbosity #
#############
_trace=options.verbose
###################
# Processing unit #
###################
if options.unit == "byte":
options.unit = UNIT_BYTE
elif options.unit == "character":
options.unit = UNIT_CHARACTER
elif options.unit == "word":
options.unit = UNIT_WORD
else:
print >> _stderr, "Please specify a valid unit type."
exit(EXIT_BAD_OPTION)
######################
# Build suffix array #
######################
if not options.SAFile: # Build the suffix array from INPUT
if not options.input:#default is standard input
options.input="-"
try:
string=_open(options.input, "r").read()
except IOError:
print >> _stderr, "File %s does not exist."%options.input
exit(EXIT_ERROR_FILE)
SA=SuffixArray(string, options.unit, options.encoding, options.noLCPs)
########################
# Or load suffix array #
########################
elif not options.input and options.SAFile: #Load suffix array from SA_FILE
try:
SA=SuffixArray.fromFile(options.SAFile)
except IOError:
print >> _stderr, "SA_FILE %s does not exist."%options.SAFile
exit(EXIT_ERROR_FILE)
else:
print >> _stderr, "Please set only one option amongst --input and --load.\n"+\
"Type %s --help for more details."%_argv[0]
exit(EXIT_BAD_OPTION)
######################
# Print suffix array #
######################
if options.printSA:
#Buffered ouptut
deltaLength=1000
start=0
while start<SA.length:
print >> _stderr, SA.__str__(start, start+deltaLength)
start+=deltaLength
####################################
# Look for every string in strings #
####################################
for string in strings:
print >> _stderr, ""
print >> _stderr, "Positions of %s:"%string
print >> _stderr, " %s"%list(SA.find(string))
#########################
# Save SAFILE if needed #
#########################
if options.output:
SA.toFile(options.output)
if _trace: print >> _stderr, "Done\r\n"
if __name__ == "__main__":
if len(_argv)==2 and _argv[1]=="--test":
from doctest import testmod
testmod()
else:
main()
| gpl-3.0 |
OS2World/APP-INTERNET-torpak_2 | Lib/test/test_cl.py | 12 | 3930 | #! /usr/bin/env python
"""Whimpy test script for the cl module
Roger E. Masse
"""
import cl
from test.test_support import verbose
clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID',
'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO',
'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE',
'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS',
'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO',
'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName',
'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE',
'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG',
'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE',
'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE',
'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE',
'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE',
'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE',
'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE',
'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE',
'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS',
'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER',
'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER',
'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT',
'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET',
'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP',
'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel',
'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC',
'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO',
'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK',
'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage',
'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE',
'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE',
'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE',
'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW',
'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC',
'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE',
'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT',
'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG',
'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR',
'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS',
'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD',
'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS',
'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO',
'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF',
'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD',
'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE',
'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS',
'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor',
'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL',
'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR',
'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize',
'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332',
'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1',
'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY',
'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED',
'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD',
'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO',
'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName',
'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422',
'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error']
# This is a very inobtrusive test for the existence of the cl
# module and all it's attributes.
def main():
# touch all the attributes of al without doing anything
if verbose:
print 'Touching cl module attributes...'
for attr in clattrs:
if verbose:
print 'touching: ', attr
getattr(cl, attr)
main()
| mit |
sicily/qt4.8.4 | src/3rdparty/webkit/Source/ThirdParty/gyp/test/defines/gyptest-defines-env-regyp.py | 151 | 1312 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| lgpl-2.1 |
malzantot/protobuf-socket-rpc | python/src/protobuf/socketrpc/controller.py | 9 | 2490 | #!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
# Copyright (c) 2010 Jan Dittberner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
controller.py - Socket implementation of Google's Protocol Buffers RPC
service interface.
This package contains classes providing a socket implementation of the
RPCController abstract class.
Authors: Martin Norbury ([email protected])
Eric Saunders ([email protected])
Jan Dittberner ([email protected])
May 2009, Nov 2010
'''
# Third-party imports
import google.protobuf.service as service
class SocketRpcController(service.RpcController):
''' RpcController implementation to be used by the SocketRpcChannel class.
The RpcController is used to mediate a single method call.
'''
def __init__(self):
'''Constructor which initializes the controller's state.'''
self._fail = False
self._error = None
self.reason = None
def handleError(self, error_code, message):
'''Log and set the controller state.'''
self._fail = True
self.reason = error_code
self._error = message
def reset(self):
'''Resets the controller i.e. clears the error state.'''
self._fail = False
self._error = None
self.reason = None
def failed(self):
'''Returns True if the controller is in a failed state.'''
return self._fail
def error(self):
return self._error
| mit |
shepdelacreme/ansible | lib/ansible/modules/storage/netapp/netapp_e_alerts.py | 25 | 10359 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_alerts
short_description: NetApp E-Series manage email notification settings
description:
- Certain E-Series systems have the capability to send email notifications on potentially critical events.
- This module will allow the owner of the system to specify email recipients for these messages.
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Enable/disable the sending of email-based alerts.
default: enabled
required: false
choices:
- enabled
- disabled
server:
description:
- A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
- To use a fully qualified domain name, you must configure a DNS server on both controllers using
M(netapp_e_mgmt_interface).
- Required when I(state=enabled).
required: no
sender:
description:
- This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
- Required when I(state=enabled).
required: no
contact:
description:
- Allows the owner to specify some free-form contact information to be included in the emails.
- This is typically utilized to provide a contact phone number.
required: no
recipients:
description:
- The email addresses that will receive the email notifications.
- Required when I(state=enabled).
required: no
test:
description:
- When a change is detected in the configuration, a test email will be sent.
- This may take a few minutes to process.
- Only applicable if I(state=enabled).
default: no
type: bool
log_path:
description:
- Path to a file on the Ansible control node to be used for debug logging
required: no
notes:
- Check mode is supported.
- Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
events.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
"""
EXAMPLES = """
- name: Enable email-based alerting
netapp_e_alerts:
state: enabled
sender: [email protected]
server: [email protected]
contact: "Phone: 1-555-555-5555"
recipients:
- [email protected]
- [email protected]
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable alerting
netapp_e_alerts:
state: disabled
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: string
sample: The settings have been updated.
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Alerts(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled',
choices=['enabled', 'disabled']),
server=dict(type='str', required=False, ),
sender=dict(type='str', required=False, ),
contact=dict(type='str', required=False, ),
recipients=dict(type='list', required=False, ),
test=dict(type='bool', required=False, default=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
['state', 'enabled', ['server', 'sender', 'recipients']]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.alerts = args['state'] == 'enabled'
self.server = args['server']
self.sender = args['sender']
self.contact = args['contact']
self.recipients = args['recipients']
self.test = args['test']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
# Very basic validation on email addresses: [email protected]
email = re.compile(r"[^@]+@[^@]+\.[^@]+")
if self.sender and not email.match(self.sender):
self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
if self.recipients is not None:
for recipient in self.recipients:
if not email.match(recipient):
self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
if len(self.recipients) < 1:
self.module.fail_json(msg="At least one recipient address must be specified.")
def get_configuration(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS,
**self.creds)
self._logger.info("Current config: %s", pformat(result))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.alerts:
body = dict(alertingEnabled=True)
if not config['alertingEnabled']:
update = True
body.update(emailServerAddress=self.server)
if config['emailServerAddress'] != self.server:
update = True
body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
if self.contact and (self.contact != config['additionalContactInformation']
or not config['sendAdditionalContactInformation']):
update = True
body.update(emailSenderAddress=self.sender)
if config['emailSenderAddress'] != self.sender:
update = True
self.recipients.sort()
if config['recipientEmailAddresses']:
config['recipientEmailAddresses'].sort()
body.update(recipientEmailAddresses=self.recipients)
if config['recipientEmailAddresses'] != self.recipients:
update = True
elif config['alertingEnabled']:
body = dict(alertingEnabled=False)
update = True
self._logger.debug(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def send_test_email(self):
"""Send a test email to verify that the provided configuration is valid and functional."""
if not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid,
timeout=300, method='POST', headers=HEADERS, **self.creds)
if result['response'] != 'emailSentOK':
self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]."
% (result['response'], self.ssid))
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
update = self.update_configuration()
if self.test and update:
self._logger.info("An update was detected and test=True, running a test.")
self.send_test_email()
if self.alerts:
msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
else:
msg = 'Alerting has been disabled.'
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
alerts = Alerts()
alerts()
if __name__ == '__main__':
main()
| gpl-3.0 |
gxx/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/utils/feedgenerator.py | 51 | 2526 | import datetime
from django.utils import feedgenerator, tzinfo, unittest
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
u'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
u'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))),
"2008-11-14T13:37:00+02:00"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.mime_type, "application/atom+xml; charset=utf8"
)
| gpl-3.0 |
sogis/Quantum-GIS | python/ext-libs/owslib/fes.py | 29 | 16239 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
API for OGC Filter Encoding (FE) constructs and metadata.
Filter Encoding: http://www.opengeospatial.org/standards/filter
Currently supports version 1.1.0 (04-095).
"""
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif","fes","gml","ogc","xs","xsi"])
ns[None] = n.get_namespace("ogc")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/1.1.0/filter.xsd'
schema_location = '%s %s' % (namespaces['ogc'], schema)
class FilterRequest(object):
""" filter class """
def __init__(self, parent=None, version='1.1.0'):
"""
filter Constructor
Parameters
----------
- parent: parent etree.Element object (default is None)
- version: version (default is '1.1.0')
"""
self.version = version
self._root = etree.Element(util.nspath_eval('ogc:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None, identifier=None):
"""
Construct and process a GetRecords request
Parameters
----------
- parent: the parent Element object. If this is not, then generate a standalone request
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- propertyname: the PropertyName to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- identifier: the dc:identifier to query against with a PropertyIsEqualTo. Ignores all other inputs.
"""
# Set the identifier if passed. Ignore other parameters
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
# Set the query type if passed
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
# Set a bbox query if passed
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
# Set a keyword query if passed
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1: # loop multiple keywords into an Or
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1: # one keyword
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
# And together filters if more than one exists
filters = filter(None,[keyword_filter, bbox_filter, dc_type_equals_filter])
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint):
"""
Construct and process a GetRecords request
Parameters
----------
- constraint: An OgcExpression object
"""
self._root.append(constraint.toXML())
return self._root
def setConstraintList(self, constraints):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: A list of OgcExpression objects
The list is interpretted like so:
[a,b,c]
a || b || c
[[a,b,c]]
a && b && c
[[a,b],[c],[d],[e]] or [[a,b],c,d,e]
(a && b) || c || d || e
"""
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
return self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
return self._root
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
return self._root
class FilterCapabilities(object):
""" Abstraction for Filter_Capabilities """
def __init__(self, elem):
# Spatial_Capabilities
self.spatial_operands = [f.text for f in elem.findall(util.nspath_eval('ogc:Spatial_Capabilities/ogc:GeometryOperands/ogc:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval('ogc:Spatial_Capabilities/ogc:SpatialOperators/ogc:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.text for f in elem.findall(util.nspath_eval('ogc:Temporal_Capabilities/ogc:TemporalOperands/ogc:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval('ogc:Temporal_Capabilities/ogc:TemporalOperators/ogc:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval('ogc:Scalar_Capabilities/ogc:ComparisonOperators/ogc:ComparisonOperator', namespaces))]
class FilterCapabilities200(object):
"""Abstraction for Filter_Capabilities 2.0"""
def __init__(self, elem):
# Spatial_Capabilities
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval('fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval('fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval('fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval('fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval('fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
# Conformance
self.conformance = []
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('fes:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
"""
constructs a SortBy element
Parameters
----------
- parent: parent etree.Element object
- propertyname: the PropertyName
- order: the SortOrder (default is 'ASC')
"""
tmp = etree.SubElement(parent, util.nspath_eval('ogc:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('ogc:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('ogc:PropertyName', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('ogc:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC','ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("ogc:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('ogc:PropertyName', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('ogc:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("ogc:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
""" Super class of all the property operation classes"""
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('ogc:PropertyName', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('ogc:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
""" PropertyIsEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
""" PropertyIsNotEqualTo class """
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
"""PropertyIsLessThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
"""PropertyIsGreaterThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsLessThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsGreaterThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'ogc:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
"""PropertyIsLike class"""
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('ogc:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('ogc:PropertyName', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('ogc:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
"""PropertyIsNull class"""
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('ogc:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('ogc:PropertyName', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
"""PropertyIsBetween class"""
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('ogc:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('ogc:PropertyName', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('ogc:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('ogc:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('ogc:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('ogc:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
"""Construct a BBox, two pairs of coordinates (west-south and east-north)"""
def __init__(self, bbox):
self.bbox = bbox
def toXML(self):
tmp = etree.Element(util.nspath_eval('ogc:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('ogc:PropertyName', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '%s %s' % (self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '%s %s' % (self.bbox[2], self.bbox[3])
return tmp
# BINARY
class BinaryLogicOpType(OgcExpression):
""" Binary Operators: And / Or """
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And,self).__init__('ogc:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or,self).__init__('ogc:Or', operations)
# UNARY
class UnaryLogicOpType(OgcExpression):
""" Unary Operator: Not """
def __init__(self, urary_operator, operations):
self.urary_operator = urary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.urary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not,self).__init__('ogc:Not', operations)
| gpl-2.0 |
rbrito/pkg-youtube-dl | youtube_dl/extractor/airmozilla.py | 57 | 2697 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
)
class AirMozillaIE(InfoExtractor):
_VALID_URL = r'https?://air\.mozilla\.org/(?P<id>[0-9a-z-]+)/?'
_TEST = {
'url': 'https://air.mozilla.org/privacy-lab-a-meetup-for-privacy-minded-people-in-san-francisco/',
'md5': '8d02f53ee39cf006009180e21df1f3ba',
'info_dict': {
'id': '6x4q2w',
'ext': 'mp4',
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
'thumbnail': r're:https?://.*/poster\.jpg',
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
'timestamp': 1422487800,
'upload_date': '20150128',
'location': 'SFO Commons',
'duration': 3780,
'view_count': int,
'categories': ['Main', 'Privacy'],
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(r'//vid\.ly/(.*?)/embed', webpage, 'id')
embed_script = self._download_webpage('https://vid.ly/{0}/embed'.format(video_id), video_id)
jwconfig = self._parse_json(self._search_regex(
r'initCallback\((.*)\);', embed_script, 'metadata'), video_id)['config']
info_dict = self._parse_jwplayer_data(jwconfig, video_id)
view_count = int_or_none(self._html_search_regex(
r'Views since archived: ([0-9]+)',
webpage, 'view count', fatal=False))
timestamp = parse_iso8601(self._html_search_regex(
r'<time datetime="(.*?)"', webpage, 'timestamp', fatal=False))
duration = parse_duration(self._search_regex(
r'Duration:\s*(\d+\s*hours?\s*\d+\s*minutes?)',
webpage, 'duration', fatal=False))
info_dict.update({
'id': video_id,
'title': self._og_search_title(webpage),
'url': self._og_search_url(webpage),
'display_id': display_id,
'description': self._og_search_description(webpage),
'timestamp': timestamp,
'location': self._html_search_regex(r'Location: (.*)', webpage, 'location', default=None),
'duration': duration,
'view_count': view_count,
'categories': re.findall(r'<a href=".*?" class="channel">(.*?)</a>', webpage),
})
return info_dict
| unlicense |
nlharris/narrative | src/biokbase/userandjobstate/client.py | 4 | 45124 | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
# Passes on URLError, timeout, and BadStatusLine exceptions.
# See:
# http://docs.python.org/2/library/urllib2.html
# http://docs.python.org/2/library/httplib.html
#
############################################################
try:
import json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as json
import urllib2
import httplib
import urlparse
import random
import base64
import httplib2
from urllib2 import URLError, HTTPError
from ConfigParser import ConfigParser
import os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
h = httplib2.Http(disable_ssl_certificate_validation=True)
auth = base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
h.add_credentials(user_id, password)
h.follow_all_redirects = True
url = auth_svc
resp, content = h.request(url, 'GET', headers=headers)
status = int(resp['status'])
if status >= 200 and status <= 299:
tok = json.loads(content)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination %s:%s' % (user_id, password))
else:
raise Exception(str(resp))
return tok['access_token']
def _read_rcfile(file=os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if os.path.exists(file):
try:
with open(file) as authrc:
rawdata = json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if os.path.exists(file):
try:
config = ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in
('user_id', 'token', 'client_secret',
'keyfile', 'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return json.JSONEncoder.default(self, obj)
class UserAndJobState(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False):
if url is None:
url = 'https://kbase.us/services/userandjobstate/'
scheme, _, _, _, _, _ = urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in os.environ:
self._headers['AUTHORIZATION'] = os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def ver(self):
arg_hash = {'method': 'UserAndJobState.ver',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def set_state(self, service, key, value):
arg_hash = {'method': 'UserAndJobState.set_state',
'params': [service, key, value],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def set_state_auth(self, token, key, value):
arg_hash = {'method': 'UserAndJobState.set_state_auth',
'params': [token, key, value],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_state(self, service, key, auth):
arg_hash = {'method': 'UserAndJobState.get_state',
'params': [service, key, auth],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def has_state(self, service, key, auth):
arg_hash = {'method': 'UserAndJobState.has_state',
'params': [service, key, auth],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_has_state(self, service, key, auth):
arg_hash = {'method': 'UserAndJobState.get_has_state',
'params': [service, key, auth],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result']
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def remove_state(self, service, key):
arg_hash = {'method': 'UserAndJobState.remove_state',
'params': [service, key],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def remove_state_auth(self, token, key):
arg_hash = {'method': 'UserAndJobState.remove_state_auth',
'params': [token, key],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def list_state(self, service, auth):
arg_hash = {'method': 'UserAndJobState.list_state',
'params': [service, auth],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def list_state_services(self, auth):
arg_hash = {'method': 'UserAndJobState.list_state_services',
'params': [auth],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def create_job(self):
arg_hash = {'method': 'UserAndJobState.create_job',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def start_job(self, job, token, status, desc, progress, est_complete):
arg_hash = {'method': 'UserAndJobState.start_job',
'params': [job, token, status, desc, progress, est_complete],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def create_and_start_job(self, token, status, desc, progress, est_complete):
arg_hash = {'method': 'UserAndJobState.create_and_start_job',
'params': [token, status, desc, progress, est_complete],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def update_job_progress(self, job, token, status, prog, est_complete):
arg_hash = {'method': 'UserAndJobState.update_job_progress',
'params': [job, token, status, prog, est_complete],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def update_job(self, job, token, status, est_complete):
arg_hash = {'method': 'UserAndJobState.update_job',
'params': [job, token, status, est_complete],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_job_description(self, job):
arg_hash = {'method': 'UserAndJobState.get_job_description',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result']
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_job_status(self, job):
arg_hash = {'method': 'UserAndJobState.get_job_status',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result']
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def complete_job(self, job, token, status, error, res):
arg_hash = {'method': 'UserAndJobState.complete_job',
'params': [job, token, status, error, res],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_results(self, job):
arg_hash = {'method': 'UserAndJobState.get_results',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_detailed_error(self, job):
arg_hash = {'method': 'UserAndJobState.get_detailed_error',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_job_info(self, job):
arg_hash = {'method': 'UserAndJobState.get_job_info',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def list_jobs(self, services, filter):
arg_hash = {'method': 'UserAndJobState.list_jobs',
'params': [services, filter],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def list_job_services(self):
arg_hash = {'method': 'UserAndJobState.list_job_services',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def share_job(self, job, users):
arg_hash = {'method': 'UserAndJobState.share_job',
'params': [job, users],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def unshare_job(self, job, users):
arg_hash = {'method': 'UserAndJobState.unshare_job',
'params': [job, users],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_job_owner(self, job):
arg_hash = {'method': 'UserAndJobState.get_job_owner',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_job_shared(self, job):
arg_hash = {'method': 'UserAndJobState.get_job_shared',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def delete_job(self, job):
arg_hash = {'method': 'UserAndJobState.delete_job',
'params': [job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def force_delete_job(self, token, job):
arg_hash = {'method': 'UserAndJobState.force_delete_job',
'params': [token, job],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
| mit |
sparkslabs/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Support/Data/Repository.py | 3 | 46255 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===========================================
Kamaelia component repository introspection
===========================================
This support code scans through a Kamaelia installation detecting components and
picking up relevant information such as doc strings, initializer arguments and
the declared Inboxes and Outboxes.
It not only detects components and prefabs, but also picks up modules, classes
and functions - making this a good source for documentation generation.
Example Usage
-------------
Simple lists of component/prefab names
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fetch a flat listing of all components. The key is the module path (as a tuple)
and the value is a list of the names of the components found::
>>> r=Repository.GetAllKamaeliaComponents()
>>> r[('Kamaelia','Util','Console')]
['ConsoleEchoer', 'ConsoleReader']
Fetch a *nested* listing of all components. The leaf is a list of entity names::
>>> r=Repository.GetAllKamaeliaComponentsNested()
>>> r['Kamaelia']['Util']['Console']
['ConsoleEchoer', 'ConsoleReader']
Fetch a flat listing of all prefabs::
>>> p=Repository.GetAllKamaeliaPrefabs()
>>> p[('Kamaelia','File','Reading')]
['RateControlledFileReader', 'RateControlledReusableFileReader',
'ReusableFileReader', 'FixedRateControlledReusableFileReader']
Fetch a *nested* listing of all prefabs::
>>> p=Repository.GetAllKamaeliaPrefabsNested()
>>> p['Kamaelia']['File']['Reading']
['RateControlledFileReader', 'RateControlledReusableFileReader',
'ReusableFileReader', 'FixedRateControlledReusableFileReader']
Fetching a flat listing of components as defined in a specific directory (rather
than the current Kamaelia installation)::
>>> r=Repository.GetAllKamaeliaComponents(baseDir="/data/my-projects/my-components/")
Detailed introspections::
~~~~~~~~~~~~~~~~~~~~~~~~~
We can ask for a complete introspection of the current Kamaelia installation::
>>> docTree=Repository.ModuleDoc("Kamaelia","/usr/lib/python/site-packages/Kamaelia")
>>> docTree.resolve(roots={"Kamaelia":docTree})
And look up a particular module::
>>> m=docTree.find("Util.Console")
>>> m
<Repository.ModuleDoc object at 0x40403b0c>
Then find components declared in that module::
>>> cs=m.listAllComponents()
>>> cs
[('ConsoleReader', <Repository.ClassScope object at 0x41511bac>), ('ConsoleEchoer', <Repository.ClassScope object at 0x4115990c>)]
>>> (name,c)=cs[0]
>>> name
'ConsoleReader'
>>> c
<Repository.ClassScope object at 0x41511bac>
And look at properties of that component::
>>> c.module
'Kamaelia.Util.Console'
>>> c.inboxes
{'control': 'NOT USED', 'inbox': 'NOT USED'}
>>> c.outboxes
{'outbox': 'Lines that were typed at the console', 'signal': 'NOT USED'}
>>> print c.doc
ConsoleReader([prompt][,eol]) -> new ConsoleReader component.
Component that provides a console for typing in stuff. Each line is output
from the "outbox" outbox one at a time.
Keyword arguments:
- prompt -- Command prompt (default=">>> ")
- eol -- End of line character(s) to put on end of every line outputted (default is newline)
This includes methods defined in it::
>>> c.listAllFunctions()
[('main', <Repository.FunctionScope object at 0x4166822c>), ('__init__', <Repository.FunctionScope object at 0x4166224c>)]
>>> name,f=c.listAllFunctions()[1]
>>> name
'__init__'
>>> f
<Repository.FunctionScope object at 0x4166224c>
We can look at the docs for the function:
>>> f.doc
'x.__init__(...) initializes x; see x.__class__.__doc__ for signature'
We can ask for a string summarising the method's arguments::
>>> f.argString
'self[, prompt][, eol]'
Or a list naming each argument, consisting of (argname, summary-representation)
pairs::
>>> f.args
[('self', 'self'), ('prompt', '[prompt]'), ('eol', '[eol]')]
Obtaining introspection data
----------------------------
To get a detailed introspection you create a ModuleDoc object. You can
either point it at a specific directory, or just let it introspect the currently
installed Kamaelia repository.
You can specify the module path corresponding to that directory (the "root
name"). The default is simply "Kamaelia". If for example, you point it at the
Kamaelia.Chassis directory; you should explain that the root name is
"Kamaelia.Chassis". Or if, for example, you are using this code to document
Axon, you would specify a root name of "Axon".
After instantiating your ModuleDoc object; remember to call its "resolve" method
to allow it to resolve references to base classes, and determine method the
resolution order for classes.
How are components and prefabs detected?
----------------------------------------
Components and prefabs are detected in sourcefiles by looking for declarations
of an __kamaelia_components__ and __kamaelia_prefabs__ variables, for example::
__kamaelia_components__ = [ "IcecastClient", "IcecastDemux", "IcecastStreamWriter" ]
__kamaelia_prefabs__ = [ "IcecastStreamRemoveMetadata" ]
They should be declared individually, at module level, and should consist of a
simple list of strings giving the names of the components/prefabs present.
Structure of detailed introspections
------------------------------------
The introspection is a hierarchy of Scope objects, each representing a delcaration
scope - such as a module, class, function, etc. These are built up to reflect
the structure of the library if it is imported.
* ModuleDoc objects represent each module. They may contain:
* Other ModuleDoc objects
* ImportScope objects
* ClassScope objects (representing classes and components)
* FunctionScope objects (repesenting functions and prefabs)
* UnparsedScope objects (anything that wasn't parsed)
ClassScope and FunctionScope objects may also contain any of these. For example,
methods in a class will be represented as FunctionScope objects within the
ClassScope object.
The find() method of any given scope can be used to lookup a symbol in that scope,
or its children. For example, you could call find() on the "Kamaelia.Chassis"
ModuleDoc object with the argument "Graphline.Graphline" to retrieve the graphline
component (its full path is "Kamaelia.Chassis.Graphline.Graphline")
The listAllXXXXX() methods enumerate items - such as classes, functions,
components, prefabs or modules.
Implementation Details
----------------------
This code uses the python compiler.ast module to parse the source of python
files, rather than import them. This allows introspection of code that might not
necessarily run on the system at hand - perhaps because not all dependancies can
be satisfied.
Basic tracking of assignment operations is performed, so the following is fair
game::
from Axon.Component import component as flurble
class Boo(flurble):
pass
Foo=Boo
However anything more comple is not processed. For example, functions and
classes declared within "if" statement will not be found::
if 1:
class WillNotBeDetected:
pass
def AlsoWillNotBeDetected():
pass
The simplified functions that only return lists of component/prefab names (
GetAllKamaeliaComponentsNested, GetAllKamaeliaComponents,
GetAllKamaeliaPrefabsNested and GetAllKamaeliaPrefabs) simply run the full
introspection of the codebase but then throw most of the information away.
"""
import os
import sys
import compiler
from compiler import ast
import __builtin__ as BUILTINS
from os.path import isdir
from os.path import isfile
from os.path import exists
from os.path import join as pathjoin
class Scope(object):
"""\
Representation of a declaration scope - could be a module, class, function, import, etc.
Basically something that might contain other symbols.
"""
def __init__(self, type="Module", ASTChildren=None, imports=None, localModules={}, rootScope=None):
"""\
Arguments:
- type -- Descriptive name saying what kind of scope this is.
- ASTChildren -- List of AST nodes for whatever is within this scope. Will be parsed.
- imports -- Scope acting as a container/root for tracking any imports. Should be shared between all Scope objects.
- localModules -- Dict mapping module names that might be present in the same lexical level as this module, or deeper; mapping them to their full module names. Eg. for Axon.Component this might contain a mapping for "AxonException" to "Axon.AxonException"
- rootScope -- Scope object for current lexical root - eg. the Module containing this scope.
"""
super(Scope,self).__init__()
self.symbols={}
self.type=type
if imports is not None:
self.imports=imports
else:
self.imports=ImportScope("")
self.localModules=localModules
if rootScope is not None:
self.rootScope=rootScope
else:
self.rootScope=self
if ASTChildren is None or ASTChildren==[]:
return
# parse the AST
try:
ASTChildren=ASTChildren.getChildNodes() # python 2.3 complains if you try to iterate over the node itself
except:
pass
for node in ASTChildren:
if isinstance(node, ast.From):
self._parse_From(node) # parse "from ... import"s to recognise what symbols are mapped to what imported things
elif isinstance(node, ast.Import):
self._parse_Import(node) # parse resolvesTo to recognise what symbols are mapped to what imported things
elif isinstance(node, ast.Class):
self._parse_Class(node) # classes need to be parsed so we can work out base classes
elif isinstance(node, ast.Function):
self._parse_Function(node)
elif isinstance(node, ast.Assign):
self._parse_Assign(node) # parse assignments that map stuff thats been imported to new names
elif isinstance(node, ast.AugAssign):
pass # definitely ignore these
else:
pass # ignore everything else for the moment
return
def _parse_From(self,node):
"""Parse a 'from ... import' AST node."""
sourceModule = node.modname
for (name, destName) in node.names:
# check if this is actually a local module
if sourceModule in self.localModules:
sourceModule=self.localModules[sourceModule]
mapsTo = ".".join([sourceModule,name])
if destName == None:
destName = name
theImport=self.imports.find(mapsTo)
self.assign(destName, theImport)
def _parse_Import(self, node):
"""Parse an import AST node."""
for (name,destName) in node.names:
# if module being referred to is in the local directory, map to the full pathame
if name in self.localModules:
fullname = self.localModules[name]
else:
fullname = name
# force creation of the import, by looking for it (ImportScope objects do this)
theImport=self.imports.find(fullname)
# is it being imported as a particular name, or just as itself? (eg. import Axon.Component as Flurble)
if destName == None:
# work out the path to the root of the entity being imported (eg. "Axon.Component" for "import Component.component")
fullnamesplit = fullname.split(".")
namesplit=name.split(".")
assert(namesplit==fullnamesplit[-len(namesplit):])
head=fullnamesplit[:len(fullnamesplit)-len(namesplit)+1]
theImport=self.imports.find(".".join(head))
self.assign(namesplit[0],theImport)
else:
self.assign(destName, theImport)
def _parse_Class(self, node):
"""Parse a class statement AST node"""
self.assign(node.name, ClassScope(node,self.imports,self.localModules,self.rootScope,self))
def _parse_Function(self, node):
"""Parse a function 'def' statement AST node"""
self.assign(node.name, FunctionScope(node,self.imports,self.localModules,self.rootScope))
def _parse_Assign(self, node):
for target in node.nodes:
# for each assignment target, go clamber through mapping against the assignment expression
# we'll only properly parse things with a direct 1:1 mapping
# if, for example, the assignment relies on understanding the value being assigned, eg. (a,b) = c
# then we'll silently fail
assignments = self._mapAssign(target,node.expr)
resolvedAssignments = []
for (target,expr) in assignments:
if isinstance(expr,str):
try:
resolved = self.find(expr)
except ValueError:
resolved = UnparsedScope(ast.Name(expr),self.imports,self.localModules,self.rootScope)
else:
resolved = UnparsedScope(expr,self.imports,self.localModules,self.rootScope)
resolvedAssignments.append((target,resolved))
for (target,expr) in resolvedAssignments:
self.assign(target,expr)
def _mapAssign(self, target, expr):
"""\
Correlate each term on the lhs to the respective term on the rhs of the assignment.
Return a list of pairs (lhs, rhs) not yet resolved - just the names
"""
assignments = []
if isinstance(target, ast.AssName):
targetname = self._parse_Name(target)
if isinstance(expr, (ast.Name, ast.Getattr)):
assignments.append( (targetname, self._parse_Name(expr)) )
else:
assignments.append( (targetname, expr) )
elif isinstance(target, (ast.AssTuple, ast.AssList)):
if isinstance(expr, (ast.Tuple, ast.List)):
targets = target.nodes
exprs = expr.nodes
if len(targets)==len(exprs):
for i in range(0,len(targets)):
assignments.extend(self._mapAssign(targets[i],exprs[i]))
else:
for i in range(0,len(targets)):
assignments.append( (targetname, exprs) )
else:
pass # dont know what to do with this term on the lhs of the assignment
else:
pass # dont know what to do with this term on the lhs of the assignment
return assignments
def _parse_Name(self,node):
"""Parse a name AST node (some combination of Name/AssignName/GetAttr nodes)"""
if isinstance(node, (ast.Name, ast.AssName)):
return node.name
elif isinstance(node, (ast.Getattr, ast.AssAttr)):
return ".".join([self._parse_Name(node.expr), node.attrname])
else:
return ""
def resolveName(self,provisionalName):
"""\
Returns the name you suggest this module should have; or a different one
if this module feels it knows better :-)
Used by ImportScopes to explain that although they may have been imported into
one place; they are actually from somewhere else.
"""
return provisionalName
def find(self, name, checkRoot=True):
"""\
Find a given named symbol and return the scope object representing it.
Returns the found scope object, or raises ValueError if none can be found.
This operation recurses automatically to subscopes.
Arguments:
- name -- the path name of the thing to find below here.
- checkRoot -- Optional. If it isn't found here, check the root scope too? (default=True)
"""
segmented=name.split(".")
head=segmented[0]
tail=".".join(segmented[1:])
if head in self.symbols:
found=self.symbols[head]
if tail=="":
return found
else:
return found.find(tail,checkRoot=False)
else:
if checkRoot and self.rootScope != self:
return self.rootScope.find(name,checkRoot=False)
raise ValueError("Cannot find it!")
def locate(self,value):
"""\
Find where a given scope object is. Returns the pathname leading up to it,
or raises ValueError if it couldn't be found.
Effectively the reverse of the find() operation.
Example::
>>> myScope.locate(subSubScopeObject)
'A.B.C'
Arguments:
- value -- The scope object to locate.
"""
for symbol in self.symbols:
if value==self.symbols[symbol]:
return symbol
for symbol in self.symbols:
try:
return symbol+"."+self.symbols[symbol].locate(value)
except ValueError:
pass
raise ValueError("Can't locate it!")
def assign(self, name, value, checkRoot=True):
"""\
Sets a given named symbol to be the supplied value. The name can be a
path (dot separated), in which case it will be followed through to assign
the symbol at the end of the path.
ValueError will be raised if the path doesn't exist.
Example::
>>> myScope.assign("Flurble", Scope(...))
>>> myScope.assign("Flurble.Plig", Scope(..))
Arguments:
- name -- the path name of the thing to set
- value -- the object to assign as that name (eg. a scope object)
- checkRoot -- Optional. Check the root scope too? (default=True)
"""
segmented=name.split(".")
head=segmented[0]
tail=".".join(segmented[1:])
if tail=="":
self.symbols[head]=value
else:
if head in self.symbols:
self.symbols[head].assign(tail,value,checkRoot=False)
else:
if checkRoot and self.rootScope != self:
return self.rootScope.assign(name,value,checkRoot=False)
raise ValueError("Cannot assign to this!")
def listAllClasses(self,**options):
return self.listAllMatching(ClassScope,**options)
def listAllFunctions(self,**options):
return self.listAllMatching(FunctionScope,**options)
def listAllModules(self,**options):
return self.listAllMatching(ModuleScope,**options)
def listAllNonImports(self,**options):
return self.listAllNotMatching((ImportScope,ModuleScope),**options)
def listAllMatching(self,types, noRecurseTypes=None, recurseDepth=0):
"""\
Returns a list of (pathName, object) pairs for all children of the
specified type. Will recurse as deeply as you specify. You can also block
it from recursing into certain scope types. By default, recusion stops
at ModuleScope objects.
Arguments::
- types -- tuple of classes that can be returned (default is ModuleScope)
- noRecurseTypes -- tuple of classes that will *not* be recursed into (default=none)
- recurseDepth -- Optional. Maximum recursion depth (default=0)
"""
if noRecurseTypes==None:
noRecurseTypes=(ModuleScope,)
found=[]
for symbol in self.symbols:
item=self.symbols[symbol]
if isinstance(item,types):
found.append((symbol,item))
if recurseDepth>0 and not isinstance(item,noRecurseTypes):
subfound=item.listAllMatching(types,noRecurseTypes,recurseDepth-1)
for (name,thing) in subfound:
found.append((symbol+"."+name,thing))
return found
def listAllNotMatching(self,types, noRecurseTypes=None, recurseDepth=0):
"""\
Returns a list of (pathName, object) pairs for all children *not* matching the
specified type. Will recurse as deeply as you specify. You can also block
it from recursing into certain scope types. By default, recusion stops
at ModuleScope objects.
Arguments::
- types -- tuple of classes that can *not* be returned (default is ModuleScope)
- noRecurseTypes -- tuple of classes that will *not* be recursed into (default=none)
- recurseDepth -- Optional. Maximum recursion depth (default=0)
"""
if noRecurseTypes==None:
noRecurseTypes=(ModuleScope,)
found=[]
for symbol in self.symbols:
item=self.symbols[symbol]
if not isinstance(item,types):
found.append((symbol,item))
if recurseDepth>0 and not isinstance(item,noRecurseTypes):
subfound=item.listAllMatching(types,noRecurseTypes,recurseDepth-1)
for (name,thing) in subfound:
found.append((symbol+"."+name,thing))
return found
def resolve(self,_resolvePass=None,roots={}):
"""\
Post processing step for resolving imports, base classes etc.
Call this method after you have finished instantiating
your whole tree of Scope objects.
Arguments:
- _resolvePass -- For internal use. Don't specify when calling manually.
- roots -- list of master root scope objects - eg. the object representing the top level "Axon" or "Kamaelia" module.
"""
if _resolvePass==None:
self.resolve(_resolvePass=1,roots=roots)
self.resolve(_resolvePass=2,roots=roots)
else:
for (name,item) in self.symbols.items():
try:
item.resolve(_resolvePass=_resolvePass,roots=roots)
except AttributeError:
# item doesn't have a 'resolve' method
pass
class ModuleScope(Scope):
"""\
Scope object representing module scopes.
"""
def __init__(self, AST, localModules={}):
super(ModuleScope,self).__init__("Module",AST.node.nodes,None,localModules,None)
self.ast=AST
if AST.doc is not None:
self.doc = AST.doc
else:
self.doc = ""
class ClassScope(Scope):
"""\
Scope object representing class scopes.
Determines what its base classes are, and the method resolution order. A list
of (name,baseScopeObject) pairs is placed in self.bases. A list of scope objects
is placed into self.allBasesInMethodResolutionOrder.
Bases will be a mixture of ClassScope and ImportScope objects.
These lists won't be properly set until the resolve() post-pocessing method
has been called.
Sets the following attributes:
- doc -- class's doc string, or the empty string if none.
- bases -- list of (name,scope object) pairs describing the class's bases
- allBasesInMethodResolutionOrder -- list of scope objects for the bases in method resolution order
- ast -- the AST for this
"""
def __init__(self, AST, imports, localModules, rootScope, parentScope):
super(ClassScope,self).__init__("Class",AST.code,imports,localModules,rootScope)
self.ast=AST
if AST.doc is not None:
self.doc = AST.doc
else:
self.doc = ""
# parse bases
self.bases = []
for baseName in AST.bases:
parsedBaseName=self._parse_Name(baseName)
try:
base=parentScope.find(parsedBaseName)
resolvedBaseName = base.resolveName(parsedBaseName)
except ValueError:
base=None
resolvedBaseName = parsedBaseName
self.bases.append((resolvedBaseName,base))
def resolve(self,_resolvePass=None,roots={}):
"""\
Resolve pass 1:
* resolves bases, where passible to ClassScope objects - eg. checking if
imports actually refer to stuff in this tree of scope objects, and
dereferening them.
Resolve pass 2:
* determines the method resolution order
"""
super(ClassScope,self).resolve(_resolvePass,roots)
if _resolvePass==1 and len(roots):
# resolve bases that are imports that could actually be classes in one of the root hierarchies
newBases = []
for baseName,base in self.bases:
history=[]
baseNameFrags = baseName.split(".")
# chase through the (chain of) imports to see if we can find them
# in the documentation object tree roots provided
while isinstance(base,ImportScope) or base is None:
history.append(baseName)
success=False
for rootName,rootMod in roots.items():
rootNameFrags=rootName.split(".")
head=baseNameFrags[:len(rootNameFrags)]
tail=baseNameFrags[len(rootNameFrags):]
if rootNameFrags == head:
try:
base=rootMod.find(".".join(tail))
baseName=baseName
success=True
except ValueError:
continue
if baseName in history:
continue
if not success:
# ok, hit a dead end
break
if baseName in history:
# ok, we've gone circular
break
newBases.append((baseName,base))
self.bases=newBases
elif _resolvePass==2:
# now determine the method resolution order
self.allBasesInMethodResolutionOrder = _determineMRO(self)
super(ClassScope,self).resolve(_resolvePass,roots)
def _determineMRO(klass):
"""\
Pass a ClassScope object representing a class, and this method returns a
list of scope objects presenting the base classes in method resolution order.
This function applies the C3 algorithm, as used by python, to determine the
method resolution order.
"""
order=[klass]
if not isinstance(klass,ClassScope):
return order
bases=[]
for baseName,base in klass.bases:
bases.append(base)
mergedBases = [_determineMRO(base) for base in bases]
mergedBases.extend([[base] for base in bases])
while len(mergedBases) > 0:
for baselist in mergedBases:
head = baselist[0]
foundElsewhere = [True for merged in mergedBases if (head in merged[1:])]
if foundElsewhere == []:
order.append(head)
for baselist in mergedBases:
if baselist[0]==head:
del baselist[0]
mergedBases = [baselist for baselist in mergedBases if baselist != []]
break
if foundElsewhere:
raise RuntimeError("FAILURE")
return order
class FunctionScope(Scope):
"""\
Scope object representing a declared function.
Sets the following attributes:
- doc -- function's doc string, or the empty string if none.
- argString -- string describing the arguments this method takes
- argNames -- list of (name, annotatedName) tuples repesenting, in order, the arguments of the method
- ast -- the AST for this
"""
def __init__(self, AST, imports, localModules, rootScope):
super(FunctionScope,self).__init__("Class",None,imports,localModules,rootScope) # don't bother parsing function innards
self.ast=AST
if AST.doc is not None:
self.doc = AST.doc
else:
self.doc = ""
# parse arguments
argNames = [(str(argName),str(argName)) for argName in AST.argnames]
i=-1
numVar = AST.varargs or 0
numKW = AST.kwargs or 0
for j in range(numKW):
argNames[i] = ( argNames[i][0], "**"+argNames[i][1] )
i-=1
for j in range(numVar):
argNames[i] = ( argNames[i][0], "*"+argNames[i][1] )
i-=1
for j in range(len(AST.defaults)-numVar-numKW):
argNames[i] = ( argNames[i][0], "["+argNames[i][1]+"]" )
i-=1
argStr = ", ".join([arg for (_, arg) in argNames])
argStr = argStr.replace(", [", "[, ")
self.args = argNames
self.argString = argStr
class ImportScope(Scope):
"""\
Scope object representing an import.
Sets the following attributes:
- doc -- empty string
- importPathName -- the full import path name leading to this entity, eg. "Axon.Component"
"""
def __init__(self,importPathName="",imports=None):
if importPathName=="" and imports==None:
imports=self
super(ImportScope,self).__init__("Module",None,imports,[],None) # its an opaque imported module, no local modules, etc to concern ourselves with
self.doc = ""
self.importPathName=importPathName
def resolveName(self,provisionalName):
"""Returns the full (real) path name of this import"""
return self.importPathName
def find(self,name,checkRoot=False):
# we assume the symbol exists(!), so if it is referenced, we create a placeholder for it (if one doesn't already exist)
# shouldn't check in root scope of this parsing, since, as an import, this *is* the new root (its a new module)
checkRoot=False
segmented=name.split(".")
head=segmented[0]
tail=".".join(segmented[1:])
if head not in self.symbols:
if self.importPathName:
fullname=self.importPathName+"."+head
else:
fullname=head
self.assign(head, ImportScope(fullname,self.imports))
found=self.symbols[head]
if tail=="":
return found
else:
return found.find(tail,checkRoot=False)
def assign(self, name, value, checkRoot=False):
# we assume the symbol exists(!), so if it is referenced, we create a placeholder for it (if one doesn't already exist)
checkRoot=False
segmented=name.split(".")
head=segmented[0]
tail=".".join(segmented[1:])
if tail=="":
self.symbols[head]=value
else:
if head not in self.symbols:
if self.importPathName:
fullname=self.importPathName+"."+head
else:
fullname=head
self.assign(head, ImportScope(fullname,self.imports))
self.symbols[head].assign(tail,value,checkRoot=False)
class UnparsedScope(Scope):
"""\
Scope object representing something that wasn't parsed - eg. a symbol
that refers to something that isn't a simple class, function etc.
Sets the following attributes:
- doc -- empty string
- ast -- the AST tree for this unparsed entity
"""
def __init__(self, AST, imports, localModules, rootScope):
super(UnparsedScope,self).__init__("Unparsed",AST,imports,localModules,rootScope)
self.doc=""
self.ast=AST
class ModuleDoc(ModuleScope):
def __init__(self, moduleName, filePath, localModules={}):
"""\
Arguments:
- moduleName -- the full module pathname for this module
- filePath -- the full filepath of this module or this subdirectory
- localModules -- dictionary mapping localmodule pathnames to the global namespace; eg. Chassis -> Kamaelia.Chassis
"""
self.ignoreFilenames=[".svn","__init__.py"]
if isdir(filePath):
subModules,localModules,AST = self.scanSubdirs(filePath,moduleName)
else:
subModules = {}
localModules = localModules
AST = self.scanSelfOnly(filePath)
# now we've already done children and have built up localModule name mappings
# we can initialise ourselves properly (parsing the AST)
print "Parsing:",moduleName
super(ModuleDoc,self).__init__(AST,localModules)
self.localModules = localModules # just to be safe
# add "module" attribute to ourselves
self.module = moduleName
# go identify __kamaelia_components__ and __kamaelia_prefabs__ and refashion them
self.identifyComponentsAndPrefabs()
self.augmentComponentsAndPrefabs()
# add "module" attribute to all our non import children too
for (symbol,item) in self.listAllNonImports():
item.module = moduleName
# merge subModules into self.symbols
for name in subModules:
self.assign(name, subModules[name])
def scanSubdirs(self, filePath,moduleName):
subModules={}
# try to ingest __init__.py
filename=pathjoin(filePath,"__init__.py")
if exists(filename):
AST=compiler.parseFile(filename)
else:
AST=compiler.parse("")
subdirs = [name for name in os.listdir(filePath) if isdir(pathjoin(filePath,name)) and name not in self.ignoreFilenames]
sourcefiles = [name for name in os.listdir(filePath) if not name in subdirs and name[-3:]==".py" and name not in self.ignoreFilenames]
localModules={} # we're a subdirectory, ignore what we were passed
# recurse througth directory contents, doing subdirectories first
# ignore localModules we were passed; and build our own as the localModules of all children
for subDir in subdirs:
subModName=moduleName+"."+subDir
subMod = ModuleDoc(subModName, pathjoin(filePath,subDir))
subModules[subDir] = subMod
# merge the subdir's local modules into our own local modules
for key in subMod.localModules:
localModules[subDir+"."+key] = subMod.localModules[key]
# add localstuff to localModules too
for file in sourcefiles:
modName=file[:-3] # strip ".py"
localModules[modName] = moduleName+"."+modName
# now go through other module files in this directory with us
for file in sourcefiles:
modName=file[:-3]
mod = ModuleDoc(moduleName+"."+modName, pathjoin(filePath,file), localModules)
subModules[modName] = mod
return subModules,localModules,AST
def scanSelfOnly(self,filePath):
# ingest file as it stands
assert(exists(filePath))
assert(isfile(filePath))
AST=compiler.parseFile(filePath)
return AST
def identifyComponentsAndPrefabs(self):
try:
components = self.find("__kamaelia_components__")
components = _stringsInList(components.ast)
except (ValueError,TypeError):
components = []
try:
prefabs = self.find("__kamaelia_prefabs__")
prefabs = _stringsInList(prefabs.ast)
except (ValueError,TypeError):
prefabs = []
self.components = components
self.prefabs = prefabs
def augmentComponentsAndPrefabs(self):
# parse Inbox/Outbox declarations for components
for name,component in self.listAllComponents():
component.isComponent=True
try:
inboxes = component.find("Inboxes")
component.inboxes = _parseBoxes(inboxes.ast)
except ValueError:
component.inboxes = []
try:
outboxes = component.find("Outboxes")
component.outboxes = _parseBoxes(outboxes.ast)
except ValueError:
component.outboxes = []
# nothing much to do for prefabs
for name,prefab in self.listAllPrefabs():
prefab.isPrefab=True
def listAllComponents(self,**options):
return [ (name,cls) for (name,cls) in self.listAllClasses(**options) if name in self.components ]
def listAllPrefabs(self,**options):
return [ (name,fnc) for (name,fnc) in self.listAllFunctions(**options) if name in self.prefabs ]
def listAllComponentsAndPrefabs(self,**options):
return self.listAllComponents(**options) + self.listAllPrefabs(**options)
def listAllModulesIncSubModules(self):
modules = [(self.module, self)]
for (_,m) in self.listAllModules(recurseDepth=0):
modules.extend(m.listAllModulesIncSubModules())
return modules
def listAllComponentsIncSubModules(self):
components = [(self.module+"."+name, item) for (name,item) in self.listAllComponents(recurseDepth=5)]
for (_,m) in self.listAllModules(recurseDepth=0):
components.extend(m.listAllComponentsIncSubModules())
return components
def listAllPrefabsIncSubModules(self):
prefabs = [(self.module+"."+name, item) for (name,item) in self.listAllPrefabs(recurseDepth=5)]
for (_,m) in self.listAllModules(recurseDepth=0):
prefabs.extend(m.listAllPrefabsIncSubModules())
return prefabs
# ------------------------------------------------------------------------------
def _stringsInList(theList):
# flatten a tree structured list containing strings, or possibly ast nodes
if isinstance(theList, (ast.Tuple,ast.List)):
theList = theList.nodes
elif isinstance(theList, (list,tuple)):
theList = theList
else:
raise TypeError("Not a tuple or list")
found = []
for item in theList:
if isinstance(item,str):
found.append(item)
elif isinstance(item, ast.Name):
found.append(item.name)
elif isinstance(item,(list,tuple,ast.Node)):
found.extend(_stringsInList(item))
return found
def _parseBoxes(node):
if isinstance(node, ast.Dict):
return _parseDictBoxes(node)
elif isinstance(node, ast.List):
return _parseListBoxes(node)
def _parseDictBoxes(dictNode):
boxes = []
for (lhs,rhs) in dictNode.items:
if isinstance(lhs, ast.Const) and isinstance(rhs, ast.Const):
name = lhs.value
desc = rhs.value
if isinstance(name, str) and isinstance(desc, str):
boxes.append((name,desc))
return dict(boxes)
def _parseListBoxes(listNode):
boxes = []
for item in listNode.nodes:
if isinstance(item, ast.Const):
name = item.value
if isinstance(name, str):
boxes.append((name,''))
return list(boxes)
# ------------------------------------------------------------------------------
# METHODS PROVIDING
# BACKWARD COMPATIBILITY WITH OLD Repository.py
def GetAllKamaeliaComponentsNested(baseDir=None):
"""\
Return a nested structure of dictionaries. Keys are module names. Values
are either nested sub-dictionaries, or component names. The structure
maps directly to the module directory structure.
If no base-directory is specified, then the current Kamaelia installation
will be scanned.
Keyword arguments:
- baseDir -- Optional. Top directory of the code base to scan, or None for the current Kamaelia installation (default=None)
"""
flatList = GetAllKamaeliaComponents(baseDir)
flatList.sort()
return _nest(flatList)
def GetAllKamaeliaComponents(baseDir=None):
"""\
Return a flat dictionary mapping module paths to lists of component names
contained in that module. Module paths are tuples containing each element
of the path, eg ("Kamaelia","File","Reading")
If no base-directory is specified, then the current Kamaelia installation
will be scanned.
Keyword arguments:
- baseDir -- Optional. Top directory of the code base to scan, or None for the current Kamaelia installation (default=None)
"""
if baseDir is None:
import Kamaelia
baseDir=os.path.dirname(Kamaelia.__file__)
rDocs = ModuleDoc("Kamaelia",baseDir)
names = {}
for name in [name for (name,item) in rDocs.listAllComponentsIncSubModules()]:
path,name = tuple(name.split(".")[:-1]), name.split(".")[-1]
names[path] = names.get(path,[]) + [name]
return names
def GetAllKamaeliaPrefabsNested(baseDir=None):
"""\
Return a nested structure of dictionaries. Keys are module names. Values
are either nested sub-dictionaries, or prefab names. The structure
maps directly to the module directory structure.
If no base-directory is specified, then the current Kamaelia installation
will be scanned.
Keyword arguments:
- baseDir -- Optional. Top directory of the code base to scan, or None for the current Kamaelia installation (default=None)
"""
flatList = GetAllKamaeliaPrefabs(baseDir)
flatList.sort()
return _nest(flatList)
def GetAllKamaeliaPrefabs(baseDir=None):
"""\
Return a flat dictionary mapping module paths to lists of prefab names
contained in that module. Module paths are tuples containing each element
of the path, eg ("Kamaelia","File","Reading")
If no base-directory is specified, then the current Kamaelia installation
will be scanned.
Keyword arguments:
- baseDir -- Optional. Top directory of the code base to scan, or None for the current Kamaelia installation (default=None)
"""
if baseDir is None:
import Kamaelia
baseDir=os.path.dirname(Kamaelia.__file__)
rDocs = ModuleDoc("Kamaelia",baseDir)
names = {}
for name in [name for (name,item) in rDocs.listAllPrefabsIncSubModules()]:
path,name = tuple(name.split(".")[:-1]), name.split(".")[-1]
names[path] = names.get(path,[]) + [name]
return names
def _nest(flatList):
nested={}
for path in flatList:
leafModuleName=path[-2]
componentName=path[-1]
node=nested
for element in path[:-2]:
if element in node:
assert(isinstance(node[element],dict))
else:
node[element]=dict()
node=node[element]
if leafModuleName in node:
assert(isinstance(node[leafModuleName],list))
else:
node[leafModuleName]=list()
node[leafModuleName].append(componentName)
return nested
if __name__ == "__main__":
file="/home/matteh/kamaelia/trunk/Code/Python/Kamaelia/Kamaelia/File/Reading.py"
#file="/home/matteh/kamaelia/trunk/Code/Python/Kamaelia/Kamaelia/Chassis/Pipeline.py"
#file="/home/matteh/kamaelia/trunk/Code/Python/Kamaelia/Kamaelia/Protocol/RTP/NullPayloadRTP.py"
modDocs = ModuleDoc("Kamaelia.File.Reading",file,{})
print "MODULE:"
print modDocs.doc
print
print "PREFABS:"
for (name,item) in modDocs.listAllPrefabs():
print name,item.argString
print
print "COMPONENTS:"
for (name,item) in modDocs.listAllComponents():
print name
print "Inboxes: ",item.inboxes
print "Outboxes: ",item.outboxes
for (name,meth) in item.listAllFunctions():
print name + "(" + meth.argString + ")"
print
import pprint
pprint.pprint(GetAllKamaeliaComponents(),None,4)
print
print "*******************************************************************"
print
pprint.pprint(GetAllKamaeliaComponentsNested(),None,4)
print
print "*******************************************************************"
print
pprint.pprint(GetAllKamaeliaPrefabs(),None,4)
print
print "*******************************************************************"
print
pprint.pprint(GetAllKamaeliaPrefabsNested(),None,4)
print
print "*******************************************************************"
| apache-2.0 |
shinru2004/android_kernel_htc_a11 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
MIPS/external-chromium_org-tools-grit | grit/pseudo.py | 62 | 4072 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Pseudotranslation support. Our pseudotranslations are based on the
P-language, which is a simple vowel-extending language. Examples of P:
- "hello" becomes "hepellopo"
- "howdie" becomes "hopowdiepie"
- "because" becomes "bepecaupause" (but in our implementation we don't
handle the silent e at the end so it actually would return "bepecaupausepe"
The P-language has the excellent quality of increasing the length of text
by around 30-50% which is great for pseudotranslations, to stress test any
GUI layouts etc.
To make the pseudotranslations more obviously "not a translation" and to make
them exercise any code that deals with encodings, we also transform all English
vowels into equivalent vowels with diacriticals on them (rings, acutes,
diaresis, and circumflex), and we write the "p" in the P-language as a Hebrew
character Qof. It looks sort of like a latin character "p" but it is outside
the latin-1 character set which will stress character encoding bugs.
'''
from grit import lazy_re
from grit import tclib
# An RFC language code for the P pseudolanguage.
PSEUDO_LANG = 'x-P-pseudo'
# Hebrew character Qof. It looks kind of like a 'p' but is outside
# the latin-1 character set which is good for our purposes.
# TODO(joi) For now using P instead of Qof, because of some bugs it used. Find
# a better solution, i.e. one that introduces a non-latin1 character into the
# pseudotranslation.
#_QOF = u'\u05e7'
_QOF = u'P'
# How we map each vowel.
_VOWELS = {
u'a' : u'\u00e5', # a with ring
u'e' : u'\u00e9', # e acute
u'i' : u'\u00ef', # i diaresis
u'o' : u'\u00f4', # o circumflex
u'u' : u'\u00fc', # u diaresis
u'y' : u'\u00fd', # y acute
u'A' : u'\u00c5', # A with ring
u'E' : u'\u00c9', # E acute
u'I' : u'\u00cf', # I diaresis
u'O' : u'\u00d4', # O circumflex
u'U' : u'\u00dc', # U diaresis
u'Y' : u'\u00dd', # Y acute
}
# Matches vowels and P
_PSUB_RE = lazy_re.compile("(%s)" % '|'.join(_VOWELS.keys() + ['P']))
# Pseudotranslations previously created. This is important for performance
# reasons, especially since we routinely pseudotranslate the whole project
# several or many different times for each build.
_existing_translations = {}
def MapVowels(str, also_p = False):
'''Returns a copy of 'str' where characters that exist as keys in _VOWELS
have been replaced with the corresponding value. If also_p is true, this
function will also change capital P characters into a Hebrew character Qof.
'''
def Repl(match):
if match.group() == 'p':
if also_p:
return _QOF
else:
return 'p'
else:
return _VOWELS[match.group()]
return _PSUB_RE.sub(Repl, str)
def PseudoString(str):
'''Returns a pseudotranslation of the provided string, in our enhanced
P-language.'''
if str in _existing_translations:
return _existing_translations[str]
outstr = u''
ix = 0
while ix < len(str):
if str[ix] not in _VOWELS.keys():
outstr += str[ix]
ix += 1
else:
# We want to treat consecutive vowels as one composite vowel. This is not
# always accurate e.g. in composite words but good enough.
consecutive_vowels = u''
while ix < len(str) and str[ix] in _VOWELS.keys():
consecutive_vowels += str[ix]
ix += 1
changed_vowels = MapVowels(consecutive_vowels)
outstr += changed_vowels
outstr += _QOF
outstr += changed_vowels
_existing_translations[str] = outstr
return outstr
def PseudoMessage(message):
'''Returns a pseudotranslation of the provided message.
Args:
message: tclib.Message()
Return:
tclib.Translation()
'''
transl = tclib.Translation()
for part in message.GetContent():
if isinstance(part, tclib.Placeholder):
transl.AppendPlaceholder(part)
else:
transl.AppendText(PseudoString(part))
return transl
| bsd-2-clause |
Venris/crazyflie-multilink | lib/cflib/crazyflie/toc.py | 1 | 7081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
A generic TableOfContents module that is used to fetch, store and minipulate
a TOC for logging or parameters.
"""
__author__ = 'Bitcraze AB'
__all__ = ['TocElement', 'Toc', 'TocFetcher']
from cflib.crtp.crtpstack import CRTPPacket
import struct
import logging
logger = logging.getLogger(__name__)
TOC_CHANNEL = 0
# Commands used when accessing the Table of Contents
CMD_TOC_ELEMENT = 0
CMD_TOC_INFO = 1
# Possible states when receiving TOC
IDLE = "IDLE"
GET_TOC_INFO = "GET_TOC_INFO"
GET_TOC_ELEMENT = "GET_TOC_ELEMENT"
class TocElement:
"""An element in the TOC."""
RW_ACCESS = 0
RO_ACCESS = 1
def __init__(self):
self.ident = 0
self.group = ""
self.name = ""
self.ctype = ""
self.pytype = ""
self.access = RO_ACCESS
class Toc:
"""Container for TocElements."""
def __init__(self):
self.toc = {}
def clear(self):
"""Clear the TOC"""
self.toc = {}
def add_element(self, element):
"""Add a new TocElement to the TOC container."""
try:
self.toc[element.group][element.name] = element
except KeyError:
self.toc[element.group] = {}
self.toc[element.group][element.name] = element
def get_element_by_complete_name(self, complete_name):
"""Get a TocElement element identified by complete name from the
container."""
try:
return self.get_element_by_id(self.get_element_id(complete_name))
except ValueError:
# Item not found
return None
def get_element_id(self, complete_name):
"""Get the TocElement element id-number of the element with the
supplied name."""
[group, name] = complete_name.split(".")
element = self.get_element(group, name)
if element:
return element.ident
else:
logger.warning("Unable to find variable [%s]", complete_name)
return None
def get_element(self, group, name):
"""Get a TocElement element identified by name and group from the
container."""
try:
return self.toc[group][name]
except KeyError:
return None
def get_element_by_id(self, ident):
"""Get a TocElement element identified by index number from the
container."""
for group in self.toc.keys():
for name in self.toc[group].keys():
if self.toc[group][name].ident == ident:
return self.toc[group][name]
return None
class TocFetcher:
"""Fetches TOC entries from the Crazyflie"""
def __init__(self, crazyflie, element_class, port, toc_holder,
finished_callback, toc_cache):
self.cf = crazyflie
self.port = port
self._crc = 0
self.requested_index = None
self.nbr_of_items = None
self.state = None
self.toc = toc_holder
self._toc_cache = toc_cache
self.finished_callback = finished_callback
self.element_class = element_class
def start(self):
"""Initiate fetching of the TOC."""
logger.debug("[%d]: Start fetching...", self.port)
# Register callback in this class for the port
self.cf.add_port_callback(self.port, self._new_packet_cb)
# Request the TOC CRC
self.state = GET_TOC_INFO
pk = CRTPPacket()
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_INFO, )
self.cf.send_packet(pk, expected_reply=(CMD_TOC_INFO,))
def _toc_fetch_finished(self):
"""Callback for when the TOC fetching is finished"""
self.cf.remove_port_callback(self.port, self._new_packet_cb)
logger.debug("[%d]: Done!", self.port)
self.finished_callback()
def _new_packet_cb(self, packet):
"""Handle a newly arrived packet"""
chan = packet.channel
if (chan != 0):
return
payload = struct.pack("B" * (len(packet.datal) - 1), *packet.datal[1:])
if (self.state == GET_TOC_INFO):
[self.nbr_of_items, self._crc] = struct.unpack("<BI", payload[:5])
logger.debug("[%d]: Got TOC CRC, %d items and crc=0x%08X",
self.port, self.nbr_of_items, self._crc)
cache_data = self._toc_cache.fetch(self._crc)
if (cache_data):
self.toc.toc = cache_data
logger.info("TOC for port [%s] found in cache" % self.port)
self._toc_fetch_finished()
else:
self.state = GET_TOC_ELEMENT
self.requested_index = 0
self._request_toc_element(self.requested_index)
elif (self.state == GET_TOC_ELEMENT):
# Always add new element, but only request new if it's not the
# last one.
if self.requested_index != ord(payload[0]):
return
self.toc.add_element(self.element_class(payload))
logger.debug("Added element [%s]",
self.element_class(payload).ident)
if (self.requested_index < (self.nbr_of_items - 1)):
logger.debug("[%d]: More variables, requesting index %d",
self.port, self.requested_index + 1)
self.requested_index = self.requested_index + 1
self._request_toc_element(self.requested_index)
else: # No more variables in TOC
self._toc_cache.insert(self._crc, self.toc.toc)
self._toc_fetch_finished()
def _request_toc_element(self, index):
"""Request information about a specific item in the TOC"""
logger.debug("Requesting index %d on port %d", index, self.port)
pk = CRTPPacket()
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ELEMENT, index)
self.cf.send_packet(pk, expected_reply=(CMD_TOC_ELEMENT, index))
| gpl-2.0 |
yaccz/xhtml2pdf | xhtml2pdf/tags.py | 13 | 19943 | # -*- coding: utf-8 -*-
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import inch, mm
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import Spacer, HRFlowable, PageBreak, Flowable
from reportlab.platypus.frames import Frame
from reportlab.platypus.paraparser import tt2ps, ABag
from xhtml2pdf import xhtml2pdf_reportlab
from xhtml2pdf.util import getColor, getSize, getAlign, dpi96
from xhtml2pdf.xhtml2pdf_reportlab import PmlImage, PmlPageTemplate
import copy
import logging
import re
import warnings
import string
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def deprecation(message):
warnings.warn("<" + message + "> is deprecated!", DeprecationWarning, stacklevel=2)
class pisaTag:
"""
The default class for a tag definition
"""
def __init__(self, node, attr):
self.node = node
self.tag = node.tagName
self.attr = attr
def start(self, c):
pass
def end(self, c):
pass
class pisaTagBODY(pisaTag):
"""
We can also asume that there is a BODY tag because html5lib
adds it for us. Here we take the base font size for later calculations
in the FONT tag.
"""
def start(self, c):
c.baseFontSize = c.frag.fontSize
# print "base font size", c.baseFontSize
class pisaTagTITLE(pisaTag):
def end(self, c):
c.meta["title"] = c.text
c.clearFrag()
class pisaTagSTYLE(pisaTag):
def start(self, c):
c.addPara()
def end(self, c):
c.clearFrag()
class pisaTagMETA(pisaTag):
def start(self, c):
name = self.attr.name.lower()
if name in ("author", "subject", "keywords"):
c.meta[name] = self.attr.content
class pisaTagSUP(pisaTag):
def start(self, c):
c.frag.super = 1
class pisaTagSUB(pisaTag):
def start(self, c):
c.frag.sub = 1
class pisaTagA(pisaTag):
rxLink = re.compile("^(#|[a-z]+\:).*")
def start(self, c):
attr = self.attr
# XXX Also support attr.id ?
if attr.name:
# Important! Make sure that cbDefn is not inherited by other
# fragments because of a bug in Reportlab!
afrag = c.frag.clone()
# These 3 lines are needed to fix an error with non internal fonts
afrag.fontName = "Helvetica"
afrag.bold = 0
afrag.italic = 0
afrag.cbDefn = ABag(
kind="anchor",
name=attr.name,
label="anchor")
c.fragAnchor.append(afrag)
c.anchorName.append(attr.name)
if attr.href and self.rxLink.match(attr.href):
c.frag.link = attr.href
def end(self, c):
pass
class pisaTagFONT(pisaTag):
# Source: http://www.w3.org/TR/CSS21/fonts.html#propdef-font-size
def start(self, c):
if self.attr["color"] is not None:
c.frag.textColor = getColor(self.attr["color"])
if self.attr["face"] is not None:
c.frag.fontName = c.getFontName(self.attr["face"])
if self.attr["size"] is not None:
size = getSize(self.attr["size"], c.frag.fontSize, c.baseFontSize)
c.frag.fontSize = max(size, 1.0)
def end(self, c):
pass
class pisaTagP(pisaTag):
def start(self, c):
# save the type of tag; it's used in PmlBaseDoc.afterFlowable()
# to check if we need to add an outline-entry
# c.frag.tag = self.tag
if self.attr.align is not None:
c.frag.alignment = getAlign(self.attr.align)
class pisaTagDIV(pisaTagP):
pass
class pisaTagH1(pisaTagP):
pass
class pisaTagH2(pisaTagP):
pass
class pisaTagH3(pisaTagP):
pass
class pisaTagH4(pisaTagP):
pass
class pisaTagH5(pisaTagP):
pass
class pisaTagH6(pisaTagP):
pass
def listDecimal(c):
c.listCounter += 1
return unicode("%d." % c.listCounter)
roman_numeral_map = zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
)
def int_to_roman(i):
result = []
for integer, numeral in roman_numeral_map:
count = int(i / integer)
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def listUpperRoman(c):
c.listCounter += 1
roman = int_to_roman(c.listCounter)
return unicode("%s." % roman)
def listLowerRoman(c):
return listUpperRoman(c).lower()
def listUpperAlpha(c):
c.listCounter += 1
index = c.listCounter - 1
try:
alpha = string.ascii_uppercase[index]
except IndexError:
# needs to start over and double the character
# this will probably fail for anything past the 2nd time
alpha = string.ascii_uppercase[index - 26]
alpha *= 2
return unicode("%s." % alpha)
def listLowerAlpha(c):
return listUpperAlpha(c).lower()
_bullet = u"\u2022"
_list_style_type = {
"none": u"",
"disc": _bullet,
"circle": _bullet, # XXX PDF has no equivalent
"square": _bullet, # XXX PDF has no equivalent
"decimal": listDecimal,
"decimal-leading-zero": listDecimal,
"lower-roman": listLowerRoman,
"upper-roman": listUpperRoman,
"hebrew": listDecimal,
"georgian": listDecimal,
"armenian": listDecimal,
"cjk-ideographic": listDecimal,
"hiragana": listDecimal,
"katakana": listDecimal,
"hiragana-iroha": listDecimal,
"katakana-iroha": listDecimal,
"lower-latin": listDecimal,
"lower-alpha": listLowerAlpha,
"upper-latin": listDecimal,
"upper-alpha": listUpperAlpha,
"lower-greek": listDecimal,
}
class pisaTagUL(pisaTagP):
def start(self, c):
self.counter, c.listCounter = c.listCounter, 0
def end(self, c):
c.addPara()
# XXX Simulate margin for the moment
c.addStory(Spacer(width=1, height=c.fragBlock.spaceAfter))
c.listCounter = self.counter
class pisaTagOL(pisaTagUL):
pass
class pisaTagLI(pisaTag):
def start(self, c):
lst = _list_style_type.get(c.frag.listStyleType or "disc", _bullet)
frag = copy.copy(c.frag)
self.offset = 0
if frag.listStyleImage is not None:
frag.text = u""
f = frag.listStyleImage
if f and (not f.notFound()):
img = PmlImage(
f.getData(),
width=None,
height=None)
img.drawHeight *= dpi96
img.drawWidth *= dpi96
img.pisaZoom = frag.zoom
img.drawWidth *= img.pisaZoom
img.drawHeight *= img.pisaZoom
frag.image = img
self.offset = max(0, img.drawHeight - c.frag.fontSize)
else:
if type(lst) == type(u""):
frag.text = lst
else:
# XXX This should be the recent font, but it throws errors in Reportlab!
frag.text = lst(c)
# XXX This should usually be done in the context!!!
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
c.frag.bulletText = [frag]
def end(self, c):
c.fragBlock.spaceBefore += self.offset
class pisaTagBR(pisaTag):
def start(self, c):
c.frag.lineBreak = 1
c.addFrag()
c.fragStrip = True
del c.frag.lineBreak
c.force = True
class pisaTagIMG(pisaTag):
def start(self, c):
attr = self.attr
if attr.src and (not attr.src.notFound()):
try:
align = attr.align or c.frag.vAlign or "baseline"
width = c.frag.width
height = c.frag.height
if attr.width:
width = attr.width * dpi96
if attr.height:
height = attr.height * dpi96
img = PmlImage(
attr.src.getData(),
width=None,
height=None)
img.pisaZoom = c.frag.zoom
img.drawHeight *= dpi96
img.drawWidth *= dpi96
if (width is None) and (height is not None):
factor = getSize(height) / img.drawHeight
img.drawWidth *= factor
img.drawHeight = getSize(height)
elif (height is None) and (width is not None):
factor = getSize(width) / img.drawWidth
img.drawHeight *= factor
img.drawWidth = getSize(width)
elif (width is not None) and (height is not None):
img.drawWidth = getSize(width)
img.drawHeight = getSize(height)
img.drawWidth *= img.pisaZoom
img.drawHeight *= img.pisaZoom
img.spaceBefore = c.frag.spaceBefore
img.spaceAfter = c.frag.spaceAfter
# print "image", id(img), img.drawWidth, img.drawHeight
'''
TODO:
- Apply styles
- vspace etc.
- Borders
- Test inside tables
'''
c.force = True
if align in ["left", "right"]:
c.image = img
c.imageData = dict(
align=align
)
else:
# Important! Make sure that cbDefn is not inherited by other
# fragments because of a bug in Reportlab!
# afrag = c.frag.clone()
valign = align
if valign in ["texttop"]:
valign = "top"
elif valign in ["absmiddle"]:
valign = "middle"
elif valign in ["absbottom", "baseline"]:
valign = "bottom"
afrag = c.frag.clone()
afrag.text = ""
afrag.fontName = "Helvetica" # Fix for a nasty bug!!!
afrag.cbDefn = ABag(
kind="img",
image=img, # .getImage(), # XXX Inline?
valign=valign,
fontName="Helvetica",
fontSize=img.drawHeight,
width=img.drawWidth,
height=img.drawHeight)
c.fragList.append(afrag)
c.fontSize = img.drawHeight
except Exception: # TODO: Kill catch-all
log.warn(c.warning("Error in handling image"), exc_info=1)
else:
log.warn(c.warning("Need a valid file name!"))
class pisaTagHR(pisaTag):
def start(self, c):
c.addPara()
c.addStory(HRFlowable(
color=self.attr.color,
thickness=self.attr.size,
width=self.attr.get('width', "100%") or "100%",
spaceBefore=c.frag.spaceBefore,
spaceAfter=c.frag.spaceAfter
))
# --- Forms
if 0:
class pisaTagINPUT(pisaTag):
def _render(self, c, attr):
width = 10
height = 10
if attr.type == "text":
width = 100
height = 12
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
type=attr.type,
default=attr.value,
width=width,
height=height,
))
def end(self, c):
c.addPara()
attr = self.attr
if attr.name:
self._render(c, attr)
c.addPara()
class pisaTagTEXTAREA(pisaTagINPUT):
def _render(self, c, attr):
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
default="",
width=100,
height=100))
class pisaTagSELECT(pisaTagINPUT):
def start(self, c):
c.select_options = ["One", "Two", "Three"]
def _render(self, c, attr):
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
type="select",
default=c.select_options[0],
options=c.select_options,
width=100,
height=40))
c.select_options = None
class pisaTagOPTION(pisaTag):
pass
class pisaTagPDFNEXTPAGE(pisaTag):
"""
<pdf:nextpage name="" />
"""
def start(self, c):
# deprecation("pdf:nextpage")
c.addPara()
if self.attr.name:
c.addStory(NextPageTemplate(self.attr.name))
c.addStory(PageBreak())
class pisaTagPDFNEXTTEMPLATE(pisaTag):
"""
<pdf:nexttemplate name="" />
"""
def start(self, c):
# deprecation("pdf:frame")
c.addStory(NextPageTemplate(self.attr["name"]))
class pisaTagPDFNEXTFRAME(pisaTag):
"""
<pdf:nextframe name="" />
"""
def start(self, c):
c.addPara()
c.addStory(FrameBreak())
class pisaTagPDFSPACER(pisaTag):
"""
<pdf:spacer height="" />
"""
def start(self, c):
c.addPara()
c.addStory(Spacer(1, self.attr.height))
class pisaTagPDFPAGENUMBER(pisaTag):
"""
<pdf:pagenumber example="" />
"""
def start(self, c):
c.frag.pageNumber = True
c.addFrag(self.attr.example)
c.frag.pageNumber = False
class pisaTagPDFPAGECOUNT(pisaTag):
"""
<pdf:pagecount />
"""
def start(self, c):
c.frag.pageCount = True
c.addFrag()
c.frag.pageCount = False
def end(self, c):
c.addPageCount()
class pisaTagPDFTOC(pisaTag):
"""
<pdf:toc />
"""
def end(self, c):
c.multiBuild = True
c.addTOC()
class pisaTagPDFFRAME(pisaTag):
"""
<pdf:frame name="" static box="" />
"""
def start(self, c):
deprecation("pdf:frame")
attrs = self.attr
name = attrs["name"]
if name is None:
name = "frame%d" % c.UID()
x, y, w, h = attrs.box
self.frame = Frame(
x, y, w, h,
id=name,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=attrs.border)
self.static = False
if self.attr.static:
self.static = True
c.addPara()
self.story = c.swapStory()
else:
c.frameList.append(self.frame)
def end(self, c):
if self.static:
c.addPara()
self.frame.pisaStaticStory = c.story
c.frameStaticList.append(self.frame)
c.swapStory(self.story)
class pisaTagPDFTEMPLATE(pisaTag):
"""
<pdf:template name="" static box="" >
<pdf:frame...>
</pdf:template>
"""
def start(self, c):
deprecation("pdf:template")
attrs = self.attr
#print attrs
name = attrs["name"]
c.frameList = []
c.frameStaticList = []
if name in c.templateList:
log.warn(c.warning("template '%s' has already been defined", name))
def end(self, c):
attrs = self.attr
name = attrs["name"]
if len(c.frameList) <= 0:
log.warn(c.warning("missing frame definitions for template"))
pt = PmlPageTemplate(
id=name,
frames=c.frameList,
pagesize=A4,
)
pt.pisaStaticList = c.frameStaticList
pt.pisaBackgroundList = c.pisaBackgroundList
pt.pisaBackground = self.attr.background
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
class pisaTagPDFFONT(pisaTag):
"""
<pdf:fontembed name="" src="" />
"""
def start(self, c):
deprecation("pdf:font")
c.loadFont(self.attr.name, self.attr.src, self.attr.encoding)
class pisaTagPDFBARCODE(pisaTag):
_codeName = {
"I2OF5": "I2of5",
"ITF": "I2of5",
"CODE39": "Standard39",
"EXTENDEDCODE39": "Extended39",
"CODE93": "Standard93",
"EXTENDEDCODE93": "Extended93",
"MSI": "MSI",
"CODABAR": "Codabar",
"NW7": "Codabar",
"CODE11": "Code11",
"FIM": "FIM",
"POSTNET": "POSTNET",
"USPS4S": "USPS_4State",
"CODE128": "Code128",
"EAN13": "EAN13",
"EAN8": "EAN8",
"QR": "QR",
}
class _barcodeWrapper(Flowable):
"""
Wrapper for barcode widget
"""
def __init__(self, codeName="Code128", value="", **kw):
self.widget = createBarcodeDrawing(codeName, value=value, **kw)
def draw(self, canvas, xoffset=0, **kw):
# NOTE: `canvas' is mutable, so canvas.restoreState() is a MUST.
canvas.saveState()
canvas.translate(xoffset, 0)
self.widget.canv = canvas
self.widget.draw()
canvas.restoreState()
def wrap(self, aW, aH):
return self.widget.wrap(aW, aH)
def start(self, c):
attr = self.attr
codeName = attr.type or "Code128"
codeName = pisaTagPDFBARCODE._codeName[codeName.upper().replace("-", "")]
humanReadable = bool(attr.humanreadable)
barWidth = attr.barwidth or 0.01 * inch
barHeight = attr.barheight or 0.5 * inch
fontName = c.getFontName("OCRB10,OCR-B,OCR B,OCRB") # or "Helvetica"
fontSize = attr.fontsize or 2.75 * mm
# Assure minimal size.
if codeName in ("EAN13", "EAN8"):
barWidth = max(barWidth, 0.264 * mm)
fontSize = max(fontSize, 2.75 * mm)
else: # Code39 etc.
barWidth = max(barWidth, 0.0075 * inch)
barcode = pisaTagPDFBARCODE._barcodeWrapper(
codeName=codeName,
value=attr.value,
barWidth=barWidth,
barHeight=barHeight,
humanReadable=humanReadable,
fontName=fontName,
fontSize=fontSize,
)
width, height = barcode.wrap(c.frag.width, c.frag.height)
c.force = True
valign = attr.align or c.frag.vAlign or "baseline"
if valign in ["texttop"]:
valign = "top"
elif valign in ["absmiddle"]:
valign = "middle"
elif valign in ["absbottom", "baseline"]:
valign = "bottom"
afrag = c.frag.clone()
afrag.text = ""
afrag.fontName = fontName
afrag.cbDefn = ABag(
kind="barcode",
barcode=barcode,
width=width,
height=height,
valign=valign,
)
c.fragList.append(afrag)
| apache-2.0 |
dya2/python-for-android | python3-alpha/python3-src/Lib/test/datetimetester.py | 47 | 146989 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
import sys
import pickle
import unittest
from operator import lt, le, gt, ge, eq, ne, truediv, floordiv, mod
from test import support
import datetime as datetime_module
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import timezone
from datetime import date, datetime
import time as _time
# Needed by test_datetime
import _strptime
#
pickle_choices = [(pickle, pickle, proto)
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
assert len(pickle_choices) == pickle.HIGHEST_PROTOCOL + 1
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 34.5, "abc", {}, [], ())
# XXX Copied from test_float.
INF = float("inf")
NAN = float("nan")
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
datetime = datetime_module
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertTrue(type(orig) is tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertTrue(type(derived) is tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
for otype, args in [
(PicklableFixedOffset, (offset, 'cookie')),
(timezone, (offset,)),
(timezone, (offset, "EST"))]:
orig = otype(*args)
oname = orig.tzname(None)
self.assertIsInstance(orig, tzinfo)
self.assertIs(type(orig), otype)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), oname)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertIs(type(derived), otype)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), oname)
class TestTimeZone(unittest.TestCase):
def setUp(self):
self.ACDT = timezone(timedelta(hours=9.5), 'ACDT')
self.EST = timezone(-timedelta(hours=5), 'EST')
self.DT = datetime(2010, 1, 1)
def test_str(self):
for tz in [self.ACDT, self.EST, timezone.utc,
timezone.min, timezone.max]:
self.assertEqual(str(tz), tz.tzname(None))
def test_repr(self):
datetime = datetime_module
for tz in [self.ACDT, self.EST, timezone.utc,
timezone.min, timezone.max]:
# test round-trip
tzrep = repr(tz)
self.assertEqual(tz, eval(tzrep))
def test_class_members(self):
limit = timedelta(hours=23, minutes=59)
self.assertEqual(timezone.utc.utcoffset(None), ZERO)
self.assertEqual(timezone.min.utcoffset(None), -limit)
self.assertEqual(timezone.max.utcoffset(None), limit)
def test_constructor(self):
self.assertIs(timezone.utc, timezone(timedelta(0)))
self.assertIsNot(timezone.utc, timezone(timedelta(0), 'UTC'))
self.assertEqual(timezone.utc, timezone(timedelta(0), 'UTC'))
# invalid offsets
for invalid in [timedelta(microseconds=1), timedelta(1, 1),
timedelta(seconds=1), timedelta(1), -timedelta(1)]:
self.assertRaises(ValueError, timezone, invalid)
self.assertRaises(ValueError, timezone, -invalid)
with self.assertRaises(TypeError): timezone(None)
with self.assertRaises(TypeError): timezone(42)
with self.assertRaises(TypeError): timezone(ZERO, None)
with self.assertRaises(TypeError): timezone(ZERO, 42)
with self.assertRaises(TypeError): timezone(ZERO, 'ABC', 'extra')
def test_inheritance(self):
self.assertIsInstance(timezone.utc, tzinfo)
self.assertIsInstance(self.EST, tzinfo)
def test_utcoffset(self):
dummy = self.DT
for h in [0, 1.5, 12]:
offset = h * HOUR
self.assertEqual(offset, timezone(offset).utcoffset(dummy))
self.assertEqual(-offset, timezone(-offset).utcoffset(dummy))
with self.assertRaises(TypeError): self.EST.utcoffset('')
with self.assertRaises(TypeError): self.EST.utcoffset(5)
def test_dst(self):
self.assertIsNone(timezone.utc.dst(self.DT))
with self.assertRaises(TypeError): self.EST.dst('')
with self.assertRaises(TypeError): self.EST.dst(5)
def test_tzname(self):
self.assertEqual('UTC+00:00', timezone(ZERO).tzname(None))
self.assertEqual('UTC-05:00', timezone(-5 * HOUR).tzname(None))
self.assertEqual('UTC+09:30', timezone(9.5 * HOUR).tzname(None))
self.assertEqual('UTC-00:01', timezone(timedelta(minutes=-1)).tzname(None))
self.assertEqual('XYZ', timezone(-5 * HOUR, 'XYZ').tzname(None))
with self.assertRaises(TypeError): self.EST.tzname('')
with self.assertRaises(TypeError): self.EST.tzname(5)
def test_fromutc(self):
with self.assertRaises(ValueError):
timezone.utc.fromutc(self.DT)
with self.assertRaises(TypeError):
timezone.utc.fromutc('not datetime')
for tz in [self.EST, self.ACDT, Eastern]:
utctime = self.DT.replace(tzinfo=tz)
local = tz.fromutc(utctime)
self.assertEqual(local - utctime, tz.utcoffset(local))
self.assertEqual(local,
self.DT.replace(tzinfo=timezone.utc))
def test_comparison(self):
self.assertNotEqual(timezone(ZERO), timezone(HOUR))
self.assertEqual(timezone(HOUR), timezone(HOUR))
self.assertEqual(timezone(-5 * HOUR), timezone(-5 * HOUR, 'EST'))
with self.assertRaises(TypeError): timezone(ZERO) < timezone(ZERO)
self.assertIn(timezone(ZERO), {timezone(ZERO)})
def test_aware_datetime(self):
# test that timezone instances can be used by datetime
t = datetime(1, 1, 1)
for tz in [timezone.min, timezone.max, timezone.utc]:
self.assertEqual(tz.tzname(t),
t.replace(tzinfo=tz).tzname())
self.assertEqual(tz.utcoffset(t),
t.replace(tzinfo=tz).utcoffset())
self.assertEqual(tz.dst(t),
t.replace(tzinfo=tz).dst())
#############################################################################
# Base clase for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20, [], me])
self.assertIn([], [me, 1, 20, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(b.__rsub__(a), td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
eq(a/0.5, td(14))
eq(b/0.5, td(0, 120))
eq(a/7, td(1))
eq(b/10, td(0, 6))
eq(c/1000, td(0, 0, 1))
eq(a/10, td(0, 7*24*360))
eq(a/3600000, td(0, 0, 7*24*1000))
# Multiplication by float
us = td(microseconds=1)
eq((3*us) * 0.5, 2*us)
eq((5*us) * 0.5, 2*us)
eq(0.5 * (3*us), 2*us)
eq(0.5 * (5*us), 2*us)
eq((-3*us) * 0.5, -2*us)
eq((-5*us) * 0.5, -2*us)
# Division by int and float
eq((3*us) / 2, 2*us)
eq((5*us) / 2, 2*us)
eq((-3*us) / 2.0, -2*us)
eq((-5*us) / 2.0, -2*us)
eq((3*us) / -2, -2*us)
eq((5*us) / -2, -2*us)
eq((3*us) / -2.0, -2*us)
eq((5*us) / -2.0, -2*us)
for i in range(-10, 10):
eq((i*us/3)//us, round(i/3))
for i in range(-10, 10):
eq((i*us/-3)//us, round(i/-3))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
zero = 0
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
self.assertRaises(ZeroDivisionError, lambda: a / zero)
self.assertRaises(ZeroDivisionError, lambda: a / 0.0)
self.assertRaises(TypeError, lambda: a / '')
@support.requires_IEEE_754
def test_disallowed_special(self):
a = timedelta(42)
self.assertRaises(ValueError, a.__mul__, NAN)
self.assertRaises(ValueError, a.__truediv__, NAN)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(), td / timedelta(seconds=1))
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1)),
"%s(1)" % name)
self.assertEqual(repr(self.theclass(10, 2)),
"%s(10, 2)" % name)
self.assertEqual(repr(self.theclass(-10, 2, 400000)),
"%s(-10, 2, 400000)" % name)
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=-999999999, seconds=1),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
day = timedelta(1)
self.assertRaises(OverflowError, day.__mul__, 10**9)
self.assertRaises(OverflowError, day.__mul__, 1e9)
self.assertRaises(OverflowError, day.__truediv__, 1e-20)
self.assertRaises(OverflowError, day.__truediv__, 1e-10)
self.assertRaises(OverflowError, day.__truediv__, 9e-10)
@support.requires_IEEE_754
def _test_overflow_special(self):
day = timedelta(1)
self.assertRaises(OverflowError, day.__mul__, INF)
self.assertRaises(OverflowError, day.__mul__, -INF)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertTrue(not timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertTrue(type(t1) is T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertTrue(type(t2) is T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertTrue(type(t3) is timedelta)
t4 = T.from_td(t3)
self.assertTrue(type(t4) is T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
def test_division(self):
t = timedelta(hours=1, minutes=24, seconds=19)
second = timedelta(seconds=1)
self.assertEqual(t / second, 5059.0)
self.assertEqual(t // second, 5059)
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
self.assertEqual(t / minute, 2.5)
self.assertEqual(t // minute, 2)
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, truediv, t, zerotd)
self.assertRaises(ZeroDivisionError, floordiv, t, zerotd)
# self.assertRaises(TypeError, truediv, t, 2)
# note: floor division of a timedelta by an integer *is*
# currently permitted.
def test_remainder(self):
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
r = t % minute
self.assertEqual(r, timedelta(seconds=30))
t = timedelta(minutes=-2, seconds=30)
r = t % minute
self.assertEqual(r, timedelta(seconds=30))
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, mod, t, zerotd)
self.assertRaises(TypeError, mod, t, 10)
def test_divmod(self):
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
q, r = divmod(t, minute)
self.assertEqual(q, 2)
self.assertEqual(r, timedelta(seconds=30))
t = timedelta(minutes=-2, seconds=30)
q, r = divmod(t, minute)
self.assertEqual(q, -2)
self.assertEqual(r, timedelta(seconds=30))
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, divmod, t, zerotd)
self.assertRaises(TypeError, divmod, t, 10)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in range(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
c = self.theclass(2001,2,1)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(c - (c - day), day)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
self.assertTrue(today == todayagain or
abs(todayagain - today) < timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = sorted(map(int, ISO_LONG_YEARS_TABLE.split()))
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime("%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
# XXX: Should min and max respect subclassing?
if issubclass(self.theclass, datetime):
expected_class = datetime
else:
expected_class = date
self.assertIsInstance(self.theclass.min, expected_class)
self.assertIsInstance(self.theclass.max, expected_class)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
# Our class can be compared for equality to other classes
self.assertEqual(our == 1, False)
self.assertEqual(1 == our, False)
self.assertEqual(our != 1, True)
self.assertEqual(1 != our, True)
# But the ordering is undefined
self.assertRaises(TypeError, lambda: our < 1)
self.assertRaises(TypeError, lambda: 1 < our)
# Repeat those tests with a different class
class SomeClass:
pass
their = SomeClass()
self.assertEqual(our == their, False)
self.assertEqual(their == our, False)
self.assertEqual(our != their, True)
self.assertEqual(their != our, True)
self.assertRaises(TypeError, lambda: our < their)
self.assertRaises(TypeError, lambda: their < our)
# However, if the other class explicitly defines ordering
# relative to our class, it is allowed to do so
class LargerThanAnything:
def __lt__(self, other):
return False
def __le__(self, other):
return isinstance(other, LargerThanAnything)
def __eq__(self, other):
return isinstance(other, LargerThanAnything)
def __ne__(self, other):
return not isinstance(other, LargerThanAnything)
def __gt__(self, other):
return not isinstance(other, LargerThanAnything)
def __ge__(self, other):
return True
their = LargerThanAnything()
self.assertEqual(our == their, False)
self.assertEqual(their == our, False)
self.assertEqual(our != their, True)
self.assertEqual(their != our, True)
self.assertEqual(our < their, True)
self.assertEqual(their < our, False)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1000.
cls = self.theclass
self.assertEqual(cls(1000, 1, 1).strftime("%Y"), "1000")
for y in 1, 49, 51, 99, 100, 999:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle byte string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = b'1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in b'9', b'\0', b'\r', b'\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
# Good bytes, but bad tzinfo:
self.assertRaises(TypeError, self.theclass,
bytes([1] * len(base)), 'EST')
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + bytes([ord_byte]) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
s = pickle.dumps(a)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than 1/2 microsecond smaller than an integer.
for fts in [self.theclass.fromtimestamp,
self.theclass.utcfromtimestamp]:
self.assertEqual(fts(0.9999999), fts(1))
self.assertEqual(fts(0.99999949).microsecond, 999999)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertTrue(abs(from_timestamp - from_now) <= tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
expected = _strptime._strptime_datetime(self.theclass, string, format)
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
self.assertIs(type(expected), self.theclass)
self.assertIs(type(got), self.theclass)
strptime = self.theclass.strptime
self.assertEqual(strptime("+0002", "%z").utcoffset(), 2 * MINUTE)
self.assertEqual(strptime("-0002", "%z").utcoffset(), -2 * MINUTE)
# Only local timezone and UTC are supported
for tzseconds, tzname in ((0, 'UTC'), (0, 'GMT'),
(-_time.timezone, _time.tzname[0])):
if tzseconds < 0:
sign = '-'
seconds = -tzseconds
else:
sign ='+'
seconds = tzseconds
hours, minutes = divmod(seconds//60, 60)
dtstr = "{}{:02d}{:02d} {}".format(sign, hours, minutes, tzname)
dt = strptime(dtstr, "%z %Z")
self.assertEqual(dt.utcoffset(), timedelta(seconds=tzseconds))
self.assertEqual(dt.tzname(), tzname)
# Can produce inconsistent datetime
dtstr, fmt = "+1234 UTC", "%z %Z"
dt = strptime(dtstr, fmt)
self.assertEqual(dt.utcoffset(), 12 * HOUR + 34 * MINUTE)
self.assertEqual(dt.tzname(), 'UTC')
# yet will roundtrip
self.assertEqual(dt.strftime(fmt), dtstr)
# Produce naive datetime if no %z is provided
self.assertEqual(strptime("UTC", "%Z").tzinfo, None)
with self.assertRaises(ValueError): strptime("-2400", "%z")
with self.assertRaises(ValueError): strptime("-000", "%z")
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
self.assertRaises(TypeError, combine, d, "time") # wrong type
self.assertRaises(TypeError, combine, "date", t) # wrong type
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
self.assertRaises(ValueError,
dt.replace(tzinfo=bog).astimezone, f)
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class TestSubclassDateTime(TestDateTime):
theclass = SubclassDatetime
# Override tests not designed for subclass
def test_roundtrip(self):
pass
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertTrue(not cls(0))
self.assertTrue(not cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertTrue(t.tzinfo is b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertTrue(t.utcoffset() is None)
self.assertTrue(t.dst() is None)
self.assertTrue(t.tzname() is None)
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
for op in lt, le, gt, ge, eq, ne:
got = op(x, y)
expected = op(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = (x > y) - (x < y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertTrue(t.tzinfo is None)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertTrue(t4.tzinfo is None)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertTrue(t4.utcoffset() is None)
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertTrue(t4.tzname() is None)
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertTrue(t4.dst() is None)
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
tz = 42
def tzname(self, dt): return self.tz
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
# Issue #6697:
if '_Fast' in str(type(self)):
Badtzname.tz = '\ud800'
self.assertRaises(ValueError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertTrue(not t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(not t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertEqual(t1, t1)
self.assertEqual(t2, t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertTrue(nowaware.tzinfo is tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertTrue(nowaware.tzinfo is tz55)
nowawareplus2 = delta + nowaware
self.assertTrue(nowawareplus2.tzinfo is tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertTrue(diff.tzinfo is tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertTrue(nowawareplus.tzinfo is tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
# Different tzinfo, but the same offset
tza = timezone(HOUR, 'A')
tzb = timezone(HOUR, 'B')
delta = min.replace(tzinfo=tza) - max.replace(tzinfo=tzb)
self.assertEqual(delta, self.theclass.min - self.theclass.max)
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
utc = FixedOffset(0, "utc", 0)
for weirdtz in [FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0),
timezone(timedelta(hours=15, minutes=58), "weirdtz"),]:
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertTrue(now.tzinfo is weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue=0):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
# Ensure tm_isdst is 0 regardless of what dst() says: DST
# is never in effect for a UTC time.
self.assertEqual(0, t.tm_isdst)
# For naive datetime, utctimetuple == timetuple except for isdst
d = cls(1, 2, 3, 10, 20, 30, 40)
t = d.utctimetuple()
self.assertEqual(t[:-1], d.timetuple()[:-1])
self.assertEqual(0, t.tm_isdst)
# Same if utcoffset is None
class NOFS(DST):
def utcoffset(self, dt):
return None
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=NOFS())
t = d.utctimetuple()
self.assertEqual(t[:-1], d.timetuple()[:-1])
self.assertEqual(0, t.tm_isdst)
# Check that bad tzinfo is detected
class BOFS(DST):
def utcoffset(self, dt):
return "EST"
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=BOFS())
self.assertRaises(TypeError, d.utctimetuple)
# Check that utctimetuple() is the same as
# astimezone(utc).timetuple()
d = cls(2010, 11, 13, 14, 15, 16, 171819)
for tz in [timezone.min, timezone.utc, timezone.max]:
dtz = d.replace(tzinfo=tz)
self.assertEqual(dtz.utctimetuple()[:-1],
dtz.astimezone(timezone.utc).timetuple()[:-1])
# At the edges, UTC adjustment can produce years out-of-range
# for a datetime object. Ensure that an OverflowError is
# raised.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
self.assertRaises(OverflowError, tiny.utctimetuple)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
self.assertRaises(OverflowError, huge.utctimetuple)
# More overflow cases
tiny = cls.min.replace(tzinfo=timezone(MINUTE))
self.assertRaises(OverflowError, tiny.utctimetuple)
huge = cls.max.replace(tzinfo=timezone(-MINUTE))
self.assertRaises(OverflowError, huge.utctimetuple)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(d.isoformat('\u1234'), datestr + '\u1234' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertTrue(dt.tzinfo is f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertTrue(x.tzinfo is f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertTrue(got.tzinfo is fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertTrue(got.tzinfo is expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
MINUTE = timedelta(minutes=1)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
# Sometimes blow up. In the following, tzinfo.dst()
# implementation may return None or not None depending on
# whether DST is assumed to be in effect. In this situation,
# a ValueError should be raised by astimezone().
class tricky_notok(ok):
def dst(self, dt):
if dt.year == 2000:
return None
else:
return 10*HOUR
dt = self.theclass(2001, 1, 1).replace(tzinfo=utc_real)
self.assertRaises(ValueError, dt.astimezone, tricky_notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertTrue(not as_date == as_datetime)
self.assertTrue(not as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertEqual(as_date.__eq__(as_datetime), True)
different_day = (as_date.day + 1) % 20 + 1
as_different = as_datetime.replace(day= different_day)
self.assertEqual(as_date.__eq__(as_different), False)
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
magnucki/gitinspector | gitinspector/interval.py | 50 | 1220 | # coding: utf-8
#
# Copyright © 2012-2013 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
__since__ = ""
__until__ = ""
__ref__ = "HEAD"
def has_interval():
return __since__ + __until__ != ""
def get_since():
return __since__
def set_since(since):
global __since__
__since__ = "--since=\"" + since + "\" "
def get_until():
return __until__
def set_until(until):
global __until__
__until__ = "--until=\"" + until + "\" "
def get_ref():
return __ref__
def set_ref(ref):
global __ref__
__ref__ = ref
| gpl-3.0 |
mpvismer/pyqtgraph | pyqtgraph/tests/test_exit_crash.py | 29 | 1182 | import os, sys, subprocess, tempfile
import pyqtgraph as pg
import six
import pytest
code = """
import sys
sys.path.insert(0, '{path}')
import pyqtgraph as pg
app = pg.mkQApp()
w = pg.{classname}({args})
"""
skipmessage = ('unclear why this test is failing. skipping until someone has'
' time to fix it')
@pytest.mark.skipif(True, reason=skipmessage)
def test_exit_crash():
# For each Widget subclass, run a simple python script that creates an
# instance and then shuts down. The intent is to check for segmentation
# faults when each script exits.
tmp = tempfile.mktemp(".py")
path = os.path.dirname(pg.__file__)
initArgs = {
'CheckTable': "[]",
'ProgressDialog': '"msg"',
'VerticalLabel': '"msg"',
}
for name in dir(pg):
obj = getattr(pg, name)
if not isinstance(obj, type) or not issubclass(obj, pg.QtGui.QWidget):
continue
print(name)
argstr = initArgs.get(name, "")
open(tmp, 'w').write(code.format(path=path, classname=name, args=argstr))
proc = subprocess.Popen([sys.executable, tmp])
assert proc.wait() == 0
os.remove(tmp)
| mit |
uni-peter-zheng/tp-qemu | qemu/tests/virtio_scsi_mq.py | 2 | 8932 | import logging
import re
from autotest.client.shared import error
from autotest.client import local_host
from virttest import utils_misc
from virttest import env_process
from virttest import qemu_qtree
@error.context_aware
def run(test, params, env):
"""
Qemu multiqueue test for virtio-scsi controller:
1) Boot up a guest with virtio-scsi device which support multi-queue and
the vcpu and images number of guest should match the multi-queue number
2) Check the multi queue option from monitor
3) Check device init status in guest
4) Load I/O in all targets
5) Check the interrupt queues in guest
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def proc_interrupts_results(results):
results_dict = {}
cpu_count = 0
cpu_list = []
for line in results.splitlines():
line = line.strip()
if re.match("CPU0", line):
cpu_list = re.findall("CPU\d+", line)
cpu_count = len(cpu_list)
continue
if cpu_count > 0:
irq_key = re.split(":", line)[0]
results_dict[irq_key] = {}
content = line[len(irq_key) + 1:].strip()
if len(re.split("\s+", content)) < cpu_count:
continue
count = 0
irq_des = ""
for irq_item in re.split("\s+", content):
if count < cpu_count:
if count == 0:
results_dict[irq_key]["count"] = []
results_dict[irq_key]["count"].append(irq_item)
else:
irq_des += " %s" % irq_item
count += 1
results_dict[irq_key]["irq_des"] = irq_des.strip()
return results_dict, cpu_list
timeout = float(params.get("login_timeout", 240))
host_cpu_num = local_host.LocalHost().get_num_cpu()
while host_cpu_num:
num_queues = str(host_cpu_num)
host_cpu_num &= host_cpu_num - 1
params['smp'] = num_queues
params['num_queues'] = num_queues
images_num = int(num_queues)
extra_image_size = params.get("image_size_extra_images", "512M")
system_image = params.get("images")
system_image_drive_format = params.get("system_image_drive_format", "ide")
params["drive_format_%s" % system_image] = system_image_drive_format
dev_type = params.get("dev_type", "i440FX-pcihost")
error.context("Boot up guest with block devcie with num_queues"
" is %s and smp is %s" % (num_queues, params['smp']),
logging.info)
for vm in env.get_all_vms():
if vm.is_alive():
vm.destroy()
for extra_image in range(images_num):
image_tag = "stg%s" % extra_image
params["images"] += " %s" % image_tag
params["image_name_%s" % image_tag] = "images/%s" % image_tag
params["image_size_%s" % image_tag] = extra_image_size
params["force_create_image_%s" % image_tag] = "yes"
image_params = params.object_params(image_tag)
env_process.preprocess_image(test, image_params, image_tag)
params["start_vm"] = "yes"
vm = env.get_vm(params["main_vm"])
env_process.preprocess_vm(test, params, env, vm.name)
session = vm.wait_for_login(timeout=timeout)
error.context("Check irqbalance service status", logging.info)
output = session.cmd_output("systemctl status irqbalance")
if not re.findall("Active: active", output):
session.cmd("systemctl start irqbalance")
output = session.cmd_output("systemctl status irqbalance")
output = utils_misc.strip_console_codes(output)
if not re.findall("Active: active", output):
raise error.TestNAError("Can not start irqbalance inside guest. "
"Skip this test.")
error.context("Pin vcpus to host cpus", logging.info)
host_numa_nodes = utils_misc.NumaInfo()
vcpu_num = 0
for numa_node_id in host_numa_nodes.nodes:
numa_node = host_numa_nodes.nodes[numa_node_id]
for _ in range(len(numa_node.cpus)):
if vcpu_num >= len(vm.vcpu_threads):
break
vcpu_tid = vm.vcpu_threads[vcpu_num]
logging.debug("pin vcpu thread(%s) to cpu"
"(%s)" % (vcpu_tid,
numa_node.pin_cpu(vcpu_tid)))
vcpu_num += 1
error.context("Verify num_queues from monitor", logging.info)
qtree = qemu_qtree.QtreeContainer()
try:
qtree.parse_info_qtree(vm.monitor.info('qtree'))
except AttributeError:
raise error.TestNAError("Monitor deson't supoort qtree "
"skip this test")
error_msg = "Number of queues mismatch: expect %s"
error_msg += " report from monitor: %s(%s)"
scsi_bus_addr = ""
for qdev in qtree.get_qtree().get_children():
if qdev.qtree["type"] == dev_type:
for pci_bus in qdev.get_children():
for pcic in pci_bus.get_children():
if pcic.qtree["class_name"] == "SCSI controller":
qtree_queues = pcic.qtree["num_queues"].split("(")[0]
if qtree_queues.strip() != num_queues.strip():
error_msg = error_msg % (num_queues,
qtree_queues,
pcic.qtree["num_queues"])
raise error.TestFail(error_msg)
if pcic.qtree["class_name"] == "SCSI controller":
scsi_bus_addr = pcic.qtree['addr']
break
if not scsi_bus_addr:
raise error.TestError("Didn't find addr from qtree. Please check "
"the log.")
error.context("Check device init status in guest", logging.info)
init_check_cmd = params.get("init_check_cmd", "dmesg | grep irq")
output = session.cmd_output(init_check_cmd)
irqs_pattern = params.get("irqs_pattern", "%s:\s+irq\s+(\d+)")
irqs_pattern = irqs_pattern % scsi_bus_addr
irqs_watch = re.findall(irqs_pattern, output)
# As there are several interrupts count for virtio device:
# config, control, event and request. And the each queue have
# a request count. So the totally count for virtio device should
# equal to queus number plus three.
if len(irqs_watch) != 3 + int(num_queues):
raise error.TestFail("Failed to check the interrupt ids from dmesg")
irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts")
output = session.cmd_output(irq_check_cmd)
irq_results, _ = proc_interrupts_results(output)
for irq_watch in irqs_watch:
if irq_watch not in irq_results:
raise error.TestFail("Can't find irq %s from procfs" % irq_watch)
error.context("Load I/O in all targets", logging.info)
get_dev_cmd = params.get("get_dev_cmd", "ls /dev/[svh]d*")
output = session.cmd_output(get_dev_cmd)
system_dev = re.findall("[svh]d(\w+)\d+", output)[0]
dd_timeout = int(re.findall("\d+", extra_image_size)[0])
fill_cmd = ""
count = 0
for dev in re.split("\s+", output):
if not dev:
continue
if not re.findall("[svh]d%s" % system_dev, dev):
fill_cmd += " dd of=%s if=/dev/urandom bs=1M " % dev
fill_cmd += "count=%s &&" % dd_timeout
count += 1
if count != images_num:
raise error.TestError("Disks are not all show up in system. Output "
"from the check command: %s" % output)
fill_cmd = fill_cmd.rstrip("&&")
session.cmd(fill_cmd, timeout=dd_timeout)
error.context("Check the interrupt queues in guest", logging.info)
output = session.cmd_output(irq_check_cmd)
irq_results, cpu_list = proc_interrupts_results(output)
irq_bit_map = 0
for irq_watch in irqs_watch:
if "request" in irq_results[irq_watch]["irq_des"]:
for index, count in enumerate(irq_results[irq_watch]["count"]):
if int(count) > 0:
irq_bit_map |= 2 ** index
cpu_count = 0
error_msg = ""
cpu_not_used = []
for index, cpu in enumerate(cpu_list):
if 2 ** index & irq_bit_map != 2 ** index:
cpu_not_used.append(cpu)
if cpu_not_used:
logging.debug("Interrupt info from procfs:\n%s" % output)
error_msg = " ".join(cpu_not_used)
if len(cpu_not_used) > 1:
error_msg += " are"
else:
error_msg += " is"
error_msg += " not used during test. Please check debug log for"
error_msg += " more information."
raise error.TestFail(error_msg)
| gpl-2.0 |
Livit/Livit.Learn.EdX | common/djangoapps/heartbeat/views.py | 199 | 1440 | from xmodule.modulestore.django import modulestore
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.exceptions import HeartbeatFailure
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc
of service id: status or message. If the status for any service is anything other than True,
it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200.
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
output = modulestore().heartbeat()
except HeartbeatFailure as fail:
return JsonResponse({fail.service: unicode(fail)}, status=503)
cursor = connection.cursor()
try:
cursor.execute("SELECT CURRENT_DATE")
cursor.fetchone()
output['SQL'] = True
except DatabaseError as fail:
return JsonResponse({'SQL': unicode(fail)}, status=503)
return JsonResponse(output)
| agpl-3.0 |
DrSLDR/consilium | consilium/consilium/settings.py | 1 | 3675 | """
Django settings for consilium project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Master version thing
VERSION = '0.2.3+89'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^=nm!n%!=b0!pl$!^x7@c_xr*%s=_l$ade6f%7rl1cr8j1@d5&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'login',
'speaker',
'userimport',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'consilium.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'static'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'apptemplates.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
WSGI_APPLICATION = 'consilium.wsgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'asgiref.inmemory.ChannelLayer',
'ROUTING': 'consilium.routing.channel_routing',
},
}
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Stockholm'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| gpl-3.0 |
wangming28/syzygy | third_party/numpy/files/numpy/lib/tests/test_ufunclike.py | 35 | 1924 | from numpy.testing import *
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing.decorators import deprecated
class TestUfunclike(TestCase):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([True, False, False, False, False, False])
res = ufl.isposinf(a)
assert_equal(res, tgt)
res = ufl.isposinf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([False, True, False, False, False, False])
res = ufl.isneginf(a)
assert_equal(res, tgt)
res = ufl.isneginf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
tgt = nx.array([[ 1., 1., 1., 1.], [-1., -1., -1., -1.]])
res = ufl.fix(a)
assert_equal(res, tgt)
res = ufl.fix(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
assert_equal(ufl.fix(3.14), 3)
def test_fix_with_subclass(self):
class MyArray(nx.ndarray):
def __new__(cls, data, metadata=None):
res = nx.array(data, copy=True).view(cls)
res.metadata = metadata
return res
def __array_wrap__(self, obj, context=None):
obj.metadata = self.metadata
return obj
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
assert_array_equal(f, nx.array([1,-1]))
assert_(isinstance(f, MyArray))
assert_equal(f.metadata, 'foo')
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
vaygr/ansible | lib/ansible/plugins/lookup/first_found.py | 20 | 4304 | # (c) 2013, seth vidal <[email protected]> red hat, inc
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: first_found
author: Seth Vidal <[email protected]>
version_added: historical
short_description: return first file found from list
description:
- this lookup checks a list of files and paths and returns the full path to the first combination found.
options:
_terms:
description: list of file names
required: True
paths:
description: list of paths in which to look for the files
"""
EXAMPLES = """
- name: show first existin file
debug: var=item
with_first_found:
- "/path/to/foo.txt"
- "bar.txt" # will be looked in files/ dir relative to play or in role
- "/path/to/biz.txt"
- name: copy first existing file found to /some/file
copy: src={{item}} dest=/some/file
with_first_found:
- foo
- "{{inventory_hostname}}
- bar
- name: same copy but specific paths
copy: src={{item}} dest=/some/file
with_first_found:
- files:
- foo
- "{{inventory_hostname}}
- bar
paths:
- /tmp/production
- /tmp/staging
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
template:
src: "{{ item }}"
dest: "/etc/foo.conf"
with_first_found:
- "{{ ansible_virtualization_type }}_foo.conf"
- "default_foo.conf"
"""
RETURN = """
_raw:
description:
- path to file found
"""
import os
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleFileNotFound, AnsibleLookupError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = boolean(term.get('skip', False), strict=False)
filelist = files
if isinstance(files, string_types):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, string_types):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = self._flatten(terms)
for fn in total_search:
try:
fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError):
continue
# get subdir if set by task executor, default to files otherwise
subdir = getattr(self, '_subdir', 'files')
path = None
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
if path is not None:
return [path]
if skip:
return []
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
"files are found")
| gpl-3.0 |
GodBlessPP/W17test_2nd_2 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/xmlreader.py | 824 | 12612 | """An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
from . import handler
from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
from . import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer:
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getNames(self):
return list(self._attrs.keys())
def getQNames(self):
return list(self._attrs.keys())
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return list(self._attrs.keys())
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return list(self._attrs.items())
def values(self):
return list(self._attrs.values())
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError(name)
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError(name)
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return list(self._qnames.values())
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| gpl-3.0 |
emedinaa/contentbox | third_party/oauthlib/oauth1/rfc5849/endpoints/request_token.py | 6 | 8904 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.request_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the request token provider logic of
OAuth 1.0 RFC 5849. It validates the correctness of request token requests,
creates and persists tokens as well as create the proper response to be
returned to the client.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import log, urlencode
from .base import BaseEndpoint
from .. import errors
class RequestTokenEndpoint(BaseEndpoint):
"""An endpoint responsible for providing OAuth 1 request tokens.
Typical use is to instantiate with a request validator and invoke the
``create_request_token_response`` from a view function. The tuple returned
has all information necessary (body, status, headers) to quickly form
and return a proper response. See :doc:`/oauth1/validator` for details on which
validator methods to implement for this endpoint.
"""
def create_request_token(self, request, credentials):
"""Create and save a new request token.
:param request: An oauthlib.common.Request object.
:param credentials: A dict of extra token credentials.
:returns: The token as an urlencoded string.
"""
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
'oauth_callback_confirmed': 'true'
}
token.update(credentials)
self.request_validator.save_request_token(token, request)
return urlencode(token.items())
def create_request_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import RequestTokenEndpoint
>>> endpoint = RequestTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_request_token_response(
... 'https://your.provider/request_token?foo=bar',
... headers={
... 'Authorization': 'OAuth realm=movies user, oauth_....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_callback_confirmed=true&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+callback+uri'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_request_token_request(
request)
if valid:
token = self.create_request_token(request, credentials or {})
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code
def validate_request_token_request(self, request):
"""Validate a request token request.
:param request: An oauthlib.common.Request object.
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if request.realm:
request.realms = request.realm.split(' ')
else:
request.realms = self.request_validator.get_default_realms(
request.client_key, request)
if not self.request_validator.check_realms(request.realms):
raise errors.InvalidRequestError(
description='Invalid realm %s. Allowed are %r.' % (
request.realms, self.request_validator.realms))
if not request.redirect_uri:
raise errors.InvalidRequestError(
description='Missing callback URI.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_requested_realms(
request.client_key, request.realms, request)
# Callback is normally never required, except for requests for
# a Temporary Credential as described in `Section 2.1`_
# .._`Section 2.1`: http://tools.ietf.org/html/rfc5849#section-2.1
valid_redirect = self.request_validator.validate_redirect_uri(
request.client_key, request.redirect_uri, request)
if not request.redirect_uri:
raise NotImplementedError('Redirect URI must either be provided '
'or set to a default during validation.')
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_realm, valid_redirect, valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s.", valid_client)
log.info("Valid realm: %s.", valid_realm)
log.info("Valid callback: %s.", valid_redirect)
log.info("Valid signature: %s.", valid_signature)
return v, request
| apache-2.0 |
galaxyproject/tools-iuc | tools/humann/customizemapping.py | 5 | 1781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from pathlib import Path
if __name__ == '__main__':
# Read command line
parser = argparse.ArgumentParser(description='Customize HUMAnN utility mapping')
parser.add_argument('--in_mapping', help="Path to mapping file to reduce")
parser.add_argument('--features', help="Path to tabular file with features to keep in first column")
parser.add_argument('--elements', help="Path to tabular file with elements to keep in other columns")
parser.add_argument('--out_mapping', help="Path to reduced mapping file")
args = parser.parse_args()
in_mapping_fp = Path(args.in_mapping)
feature_fp = Path(args.features)
element_fp = Path(args.elements)
out_mapping_fp = Path(args.out_mapping)
# extract features to keep
features = set()
with open(feature_fp, 'r') as feature_f:
for line in feature_f.readlines():
features.add(line.split("\t")[0])
print(features)
# extract elements to keep
elements = set()
with open(element_fp, 'r') as element_f:
for line in element_f.readlines():
elements.add(line.split("\t")[0])
print(elements)
# write mapping for features to keep while keeping only elements
with open(in_mapping_fp, 'r') as in_mapping_f:
with open(out_mapping_fp, 'w') as out_mapping_f:
for line in in_mapping_f.readlines():
l_split = line.split("\t")
feat = l_split[0]
if feat in features:
to_write = [feat]
for e in l_split[1:]:
if e in elements:
to_write.append(e)
out_mapping_f.write("%s\n" % '\t'.join(to_write))
| mit |
ChanduERP/odoo | addons/account/__openerp__.py | 190 | 7542 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'eInvoicing',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Accounting & Finance',
'description' : """
Accounting and Financial Management.
====================================
Financial and accounting module that covers:
--------------------------------------------
* General Accounting
* Cost/Analytic accounting
* Third party accounting
* Taxes management
* Budgets
* Customer and Supplier Invoices
* Bank statements
* Reconciliation process by partner
Creates a dashboard for accountants that includes:
--------------------------------------------------
* List of Customer Invoices to Approve
* Company Analysis
* Graph of Treasury
Processes like maintaining general ledgers are done through the defined Financial Journals (entry move line or grouping is maintained through a journal)
for a particular financial year and for preparation of vouchers there is a module named account_voucher.
""",
'website': 'https://www.odoo.com/page/billing',
'depends' : ['base_setup', 'product', 'analytic', 'board', 'edi', 'report'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'account_menuitem.xml',
'report/account_invoice_report_view.xml',
'report/account_entries_report_view.xml',
'report/account_treasury_report_view.xml',
'report/account_report_view.xml',
'report/account_analytic_entries_report_view.xml',
'wizard/account_move_bank_reconcile_view.xml',
'wizard/account_use_model_view.xml',
'account_installer.xml',
'wizard/account_period_close_view.xml',
'wizard/account_reconcile_view.xml',
'wizard/account_unreconcile_view.xml',
'wizard/account_statement_from_invoice_view.xml',
'account_view.xml',
'account_report.xml',
'account_financial_report_data.xml',
'wizard/account_report_common_view.xml',
'wizard/account_invoice_refund_view.xml',
'wizard/account_fiscalyear_close_state.xml',
'wizard/account_chart_view.xml',
'wizard/account_tax_chart_view.xml',
'wizard/account_move_line_reconcile_select_view.xml',
'wizard/account_open_closed_fiscalyear_view.xml',
'wizard/account_move_line_unreconcile_select_view.xml',
'wizard/account_vat_view.xml',
'wizard/account_report_print_journal_view.xml',
'wizard/account_report_general_journal_view.xml',
'wizard/account_report_central_journal_view.xml',
'wizard/account_subscription_generate_view.xml',
'wizard/account_fiscalyear_close_view.xml',
'wizard/account_state_open_view.xml',
'wizard/account_journal_select_view.xml',
'wizard/account_change_currency_view.xml',
'wizard/account_validate_move_view.xml',
'wizard/account_report_general_ledger_view.xml',
'wizard/account_invoice_state_view.xml',
'wizard/account_report_partner_balance_view.xml',
'wizard/account_report_account_balance_view.xml',
'wizard/account_report_aged_partner_balance_view.xml',
'wizard/account_report_partner_ledger_view.xml',
'wizard/account_reconcile_partner_process_view.xml',
'wizard/account_automatic_reconcile_view.xml',
'wizard/account_financial_report_view.xml',
'wizard/pos_box.xml',
'project/wizard/project_account_analytic_line_view.xml',
'account_end_fy.xml',
'account_invoice_view.xml',
'data/account_data.xml',
'data/data_account_type.xml',
'data/configurable_account_chart.xml',
'account_invoice_workflow.xml',
'project/project_view.xml',
'project/project_report.xml',
'project/wizard/account_analytic_balance_report_view.xml',
'project/wizard/account_analytic_cost_ledger_view.xml',
'project/wizard/account_analytic_inverted_balance_report.xml',
'project/wizard/account_analytic_journal_report_view.xml',
'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'project/wizard/account_analytic_chart_view.xml',
'partner_view.xml',
'product_view.xml',
'account_assert_test.xml',
'ir_sequence_view.xml',
'company_view.xml',
'edi/invoice_action_data.xml',
'account_bank_view.xml',
'res_config_view.xml',
'account_pre_install.yml',
'views/report_vat.xml',
'views/report_invoice.xml',
'views/report_trialbalance.xml',
'views/report_centraljournal.xml',
'views/report_overdue.xml',
'views/report_generaljournal.xml',
'views/report_journal.xml',
'views/report_salepurchasejournal.xml',
'views/report_partnerbalance.xml',
'views/report_agedpartnerbalance.xml',
'views/report_partnerledger.xml',
'views/report_partnerledgerother.xml',
'views/report_financial.xml',
'views/report_generalledger.xml',
'project/views/report_analyticbalance.xml',
'project/views/report_analyticjournal.xml',
'project/views/report_analyticcostledgerquantity.xml',
'project/views/report_analyticcostledger.xml',
'project/views/report_invertedanalyticbalance.xml',
'views/account.xml',
],
'qweb' : [
"static/src/xml/account_move_reconciliation.xml",
"static/src/xml/account_move_line_quickadd.xml",
"static/src/xml/account_bank_statement_reconciliation.xml",
],
'demo': [
'demo/account_demo.xml',
'project/project_demo.xml',
'project/analytic_account_demo.xml',
'demo/account_minimal.xml',
'demo/account_invoice_demo.xml',
'demo/account_bank_statement.xml',
'account_unit_test.xml',
],
'test': [
'test/account_test_users.yml',
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_change_currency.yml',
'test/chart_of_account.yml',
'test/account_period_close.yml',
'test/account_use_model.yml',
'test/account_validate_account_move.yml',
'test/test_edi_invoice.yml',
'test/account_report.yml',
'test/analytic_hierarchy.yml',
'test/account_fiscalyear_close.yml', #last test, as it will definitively close the demo fiscalyear
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AOSP-S4-KK/platform_external_chromium_org | build/android/pylib/instrumentation/test_package.py | 61 | 1121 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
import os
from pylib.utils import apk_helper
import test_jar
class TestPackage(test_jar.TestJar):
def __init__(self, apk_path, jar_path):
test_jar.TestJar.__init__(self, jar_path)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
self._apk_name = os.path.splitext(os.path.basename(apk_path))[0]
self._package_name = apk_helper.GetPackageName(self._apk_path)
def GetApkPath(self):
"""Returns the absolute path to the APK."""
return self._apk_path
def GetApkName(self):
"""Returns the name of the apk without the suffix."""
return self._apk_name
def GetPackageName(self):
"""Returns the package name of this APK."""
return self._package_name
# Override.
def Install(self, adb):
adb.ManagedInstall(self.GetApkPath(), package_name=self.GetPackageName())
| bsd-3-clause |
atruberg/django-custom | django/views/decorators/clickjacking.py | 550 | 1759 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/packages/requests/adapters.py | 205 | 16799 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| agpl-3.0 |
ActiveState/code | recipes/Python/576869_Longest_commsubsequence_problem/recipe-576869.py | 2 | 3166 | """
A Longest common subsequence (LCS) problem solver.
This problem is a good example of dynamic programming, and also has its
significance in biological applications.
For more information about LCS, please see:
http://en.wikipedia.org/wiki/Longest_common_subsequence_problem
Copyright 2009 Shao-Chuan Wang <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = "Shao-Chuan Wang"
__email__ = "[email protected]"
__version__ = "1.0"
__URL__ = "http://shao-chuan.appspot.com"
import functools
def cached(func):
cache = {}
def template(*args): #: template is wrapper; func is wrapped
key = (func, )+args
try:
ret = cache[key]
except KeyError:
ret = func(*args)
cache[key] = ret
else:
pass
return ret
functools.update_wrapper(template, func)
return template
@cached
def LCSLength(str1, str2):
if len(str1)==0 or len(str2)==0:
return 0
if str1[-1] == str2[-1]:
return LCSLength(str1[:-1], str2[:-1])+1
else:
return max(LCSLength(str1, str2[:-1]), LCSLength(str1[:-1], str2))
@cached
def LCS(str1, str2):
if len(str1)==0 or len(str2)==0:
return ''
if str1[-1] == str2[-1]:
return ''.join([LCS(str1[:-1], str2[:-1]), str1[-1]])
else:
candidate1 = LCS(str1[:-1], str2)
candidate2 = LCS(str1, str2[:-1])
if len(candidate1) >= len(candidate2):
return candidate1
else:
return candidate2
if __name__=='__main__':
# a simple example
lcs = LCS('abcbdab', 'bdcaba')
assert len(lcs) == LCSLength('abcbdab', 'bdcaba')
print 'Length of Longest common subsequence: %d' %(len(lcs),)
print 'Longest common subsequence: %s' % (lcs,)
# a complex example:
strA = '''abcdefgabcdefgaabcdefgabcdefgabcdesdqfgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefg'''
strB = '''gdebcdehhglkjlkabvhgdebcdehhgdebcdehhgdebcdeoshhgdebcdehhgdebcdehhgdebcdehhgdebcdehh'''
lcs = LCS(strA, strB)
assert len(lcs) == LCSLength(strA, strB)
print 'Length of Longest common subsequence: %d' %(len(lcs),)
print 'Longest common subsequence: '
print lcs
| mit |
physycom/QGIS | python/plugins/processing/algs/qgis/IdwInterpolation.py | 9 | 7135 | # -*- coding: utf-8 -*-
"""
***************************************************************************
IdwInterpolation.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRectangle,
QgsProcessingUtils,
QgsProcessingParameterNumber,
QgsProcessingParameterExtent,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterDestination,
QgsProcessingException)
from qgis.analysis import (QgsInterpolator,
QgsIDWInterpolator,
QgsGridFileWriter)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.algs.qgis.ui.InterpolationWidgets import ParameterInterpolationData, ParameterPixelSize
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class IdwInterpolation(QgisAlgorithm):
INTERPOLATION_DATA = 'INTERPOLATION_DATA'
DISTANCE_COEFFICIENT = 'DISTANCE_COEFFICIENT'
PIXEL_SIZE = 'PIXEL_SIZE'
COLUMNS = 'COLUMNS'
ROWS = 'ROWS'
EXTENT = 'EXTENT'
OUTPUT = 'OUTPUT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'interpolation.png'))
def group(self):
return self.tr('Interpolation')
def groupId(self):
return 'interpolation'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(ParameterInterpolationData(self.INTERPOLATION_DATA,
self.tr('Input layer(s)')))
self.addParameter(QgsProcessingParameterNumber(self.DISTANCE_COEFFICIENT,
self.tr('Distance coefficient P'), type=QgsProcessingParameterNumber.Double,
minValue=0.0, maxValue=99.99, defaultValue=2.0))
self.addParameter(QgsProcessingParameterExtent(self.EXTENT,
self.tr('Extent'),
optional=False))
pixel_size_param = ParameterPixelSize(self.PIXEL_SIZE,
self.tr('Output raster size'),
layersData=self.INTERPOLATION_DATA,
extent=self.EXTENT,
minValue=0.0,
default=0.1)
self.addParameter(pixel_size_param)
cols_param = QgsProcessingParameterNumber(self.COLUMNS,
self.tr('Number of columns'),
optional=True,
minValue=0, maxValue=10000000)
cols_param.setFlags(cols_param.flags() | QgsProcessingParameterDefinition.FlagHidden)
self.addParameter(cols_param)
rows_param = QgsProcessingParameterNumber(self.ROWS,
self.tr('Number of rows'),
optional=True,
minValue=0, maxValue=10000000)
rows_param.setFlags(rows_param.flags() | QgsProcessingParameterDefinition.FlagHidden)
self.addParameter(rows_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated')))
def name(self):
return 'idwinterpolation'
def displayName(self):
return self.tr('IDW interpolation')
def processAlgorithm(self, parameters, context, feedback):
interpolationData = ParameterInterpolationData.parseValue(parameters[self.INTERPOLATION_DATA])
coefficient = self.parameterAsDouble(parameters, self.DISTANCE_COEFFICIENT, context)
bbox = self.parameterAsExtent(parameters, self.EXTENT, context)
pixel_size = self.parameterAsDouble(parameters, self.PIXEL_SIZE, context)
output = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
columns = self.parameterAsInt(parameters, self.COLUMNS, context)
rows = self.parameterAsInt(parameters, self.ROWS, context)
if columns == 0:
columns = max(round(bbox.width() / pixel_size) + 1, 1)
if rows == 0:
rows = max(round(bbox.height() / pixel_size) + 1, 1)
if interpolationData is None:
raise QgsProcessingException(
self.tr('You need to specify at least one input layer.'))
layerData = []
layers = []
for i, row in enumerate(interpolationData.split('::|::')):
v = row.split('::~::')
data = QgsInterpolator.LayerData()
# need to keep a reference until interpolation is complete
layer = QgsProcessingUtils.variantToSource(v[0], context)
data.source = layer
data.transformContext = context.transformContext()
layers.append(layer)
data.valueSource = int(v[1])
data.interpolationAttribute = int(v[2])
if data.valueSource == QgsInterpolator.ValueAttribute and data.interpolationAttribute == -1:
raise QgsProcessingException(self.tr('Layer {} is set to use a value attribute, but no attribute was set'.format(i + 1)))
if v[3] == '0':
data.sourceType = QgsInterpolator.SourcePoints
elif v[3] == '1':
data.sourceType = QgsInterpolator.SourceStructureLines
else:
data.sourceType = QgsInterpolator.SourceBreakLines
layerData.append(data)
interpolator = QgsIDWInterpolator(layerData)
interpolator.setDistanceCoefficient(coefficient)
writer = QgsGridFileWriter(interpolator,
output,
bbox,
columns,
rows)
writer.writeFile(feedback)
return {self.OUTPUT: output}
| gpl-2.0 |
StamKaly/altitude-mod-foundation | alti_discord/bot.py | 1 | 8731 | import discord
import asyncio
import aiohttp
import async_timeout
import json
class DiscordMusic(discord.Client):
def __init__(self, number, commands_queue, output_queue, youtube_key, server_id, voice_channel_id, text_channel_id):
discord.Client.__init__(self)
self.number = number
self.commands_queue = commands_queue
self.output_queue = output_queue
discord.opus.load_opus('/usr/lib/x86_64-linux-gnu/libopus.so.0')
self.youtube_key = youtube_key
self.server = server_id
self.voice_channel = voice_channel_id
self.text_channel = text_channel_id
self.players = []
self.current_player = None
self.new_requests = []
self.alti_player = None
self.current_volume = 1.00
async def bot_play_from_players(self):
if self.players and self.is_voice_connected(self.server):
if self.players and not self.current_player:
self.current_player = self.players[0]
self.players.remove(self.players[0])
self.current_player.volume = self.current_volume
self.current_player.start()
if self.current_player.is_done():
self.current_player = None
async def on_ready(self):
self.voice_channel = self.get_channel(self.voice_channel)
await self.output_queue.put(self.voice_channel.voice_members)
self.text_channel = self.get_channel(self.text_channel)
self.server = self.get_server(self.server)
await self.send_message(self.text_channel, "Started")
while True:
if not self.commands_queue.empty():
command = await self.commands_queue.get()
if command[0] == "join":
await self.bot_join_voice_channel(command[1], command[2])
elif command[0] == "leave":
await self.bot_leave_voice_channel()
elif command[0] == "play":
url = await self.youtube_search(command[1])
if url:
await self.bot_add_music_url(url, command[2].nickname)
else:
await self.output_queue.put(['whisper', command[2],
'No video was found with your search terms!'])
elif command[0] == "pause":
await self.bot_pause(command[1])
elif command[0] == "resume":
await self.bot_resume(command[1])
elif command[0] == "volume":
await self.bot_change_volume(command[1], command[2])
elif command[0] == "skip":
await self.bot_skip(command[1])
elif command[0] == "request":
self.new_requests.append(command[1])
await self.bot_play_from_players()
await asyncio.sleep(0.1)
async def on_message(self, message):
if all((self.new_requests, message.channel == self.text_channel, len(message.content) == 4,
message.author.id != self.user.id, message.content.isdigit())):
found = False
for request in self.new_requests:
if message.content == str(request[0]):
await self.output_queue.put(['user', [request[1], message.author.id]])
self.new_requests.remove(request)
await self.send_message(message.channel,
"{}, you successfully registered!".format(message.author.mention))
found = True
break
if not found:
await self.send_message(message.channel,
"{}, sorry but your secret number was not found.".format(message.author.mention)
)
async def on_voice_state_update(self, before, after):
if self.alti_player and self.alti_player[0] not in [member.id for member in self.voice_channel.voice_members]:
await self.output_queue.put(['safe_leave', self.alti_player[1], self.number])
async def bot_join_voice_channel(self, discord_id, vapor_id):
if not self.is_voice_connected(self.server):
self.alti_player = (discord_id, vapor_id)
await self.join_voice_channel(self.voice_channel)
else:
print("Warning: Requested to join when already connected!")
async def bot_leave_voice_channel(self):
if self.is_voice_connected(self.server):
self.players = []
self.current_player = None
self.alti_player = None
self.current_volume = 1.00
await self.voice_client_in(self.server).disconnect()
else:
print("Warning: Requested to leave when not connected!")
async def youtube_search(self, search_terms):
search_terms = ','.join(search_terms.split())
url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&type=video&key={}'.format(search_terms,
self.youtube_key
)
async with aiohttp.ClientSession(loop=self.loop) as session:
with async_timeout.timeout(10):
async with session.get(url) as response:
try:
json_response = await response.json()
if not json_response['pageInfo']['totalResults'] == 0:
return 'http://youtube.com/watch?v={}'.format(json_response['items'][0]['id']['videoId'])
except json.decoder.JSONDecodeError:
return None
async def bot_add_music_url(self, url, nickname):
if self.is_voice_connected(self.server):
player = await self.voice_client_in(self.server).create_ytdl_player(url)
self.players.append(player)
try:
if player.title:
string = "{} enqueued {}.".format(nickname, player.title)
string.encode('ascii')
await self.output_queue.put([self.number, string])
except UnicodeEncodeError:
pass
async def bot_pause(self, alti_player):
if self.current_player and self.current_player.is_playing():
self.current_player.pause()
await self.output_queue.put([self.number, "Bot {} has paused the stream.".format(self.number)])
else:
await self.output_queue.put(['whisper', alti_player, 'Nothing is being streamed in order to pause.'])
async def bot_resume(self, alti_player):
if self.current_player and not self.current_player.is_playing():
self.current_player.resume()
await self.output_queue.put([self.number, "Bot {} has resumed the stream.".format(self.number)])
else:
await self.output_queue.put(['whisper', alti_player, 'Nothing has been paused in order to resume.'])
async def bot_change_volume(self, volume, alti_player):
if self.current_player:
current_volume = int(self.current_player.volume * 100)
self.current_volume = volume
self.current_player.volume = volume
await self.output_queue.put([self.number,
"Bot {} has changed the stream's volume\nfrom {}% to {}%.".format(
self.number, current_volume, int(volume * 100))])
else:
await self.output_queue.put(['whisper', alti_player,
'Nothing is being streamed in order to change volume.'])
async def bot_skip(self, alti_player):
if self.current_player and self.current_player.is_playing():
self.current_player.stop()
self.current_player = None
await self.output_queue.put([self.number,
"Bot {} has stopped streaming current playing song!".format(self.number)])
else:
await self.output_queue.put(['whisper', alti_player, 'No song is being played in order to skip it.'])
def bot(loop, login, number, commands_queue, output_queue, youtube_key, server_id, voice_channel_id, text_channel_id):
asyncio.set_event_loop(loop)
login = login if len(login) == 2 else [login]
DiscordMusic(number, commands_queue, output_queue, youtube_key, server_id, voice_channel_id, text_channel_id)\
.run(*login)
| mit |
qrsforever/workspace | ML/learn/TensorFlow_for_Machine_Intelligence/ch03.py | 1 | 11044 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file ch3.2.1.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-05-23 23:41:40
############################# jupyter-vim map ######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# ,ji: import file
# ,je: run cell
# ,jj: run current line
# ,jc: clear buffer; ju: update buffer; jq: quit buffer
# ,jh: open buffer horizontally; jv: open buffer virtually
# ,jf: !eog /tmp/jupyter_vim.png
# ,jp: print <cword> variable; ,jt: print <cword> type
# ,j1: output <cword> head(1), j2, j3, j4, j5 is also
# ,j6: output <cword> tail(1), j7, j8, j9, j0 is also
#####################################################################################
import tensorflow as tf
import numpy as np
#####################################################################################
# <codecell> 3.2.1: 数据流
#####################################################################################
a = tf.constant(5, name='input_a')
b = tf.constant(3, name='input_b')
c = tf.multiply(a, b, name='mul_c')
d = tf.add(a, b, name='add_d')
e = tf.add(c, d, name='add_e')
sess = tf.Session()
sess.run(e)
writer = tf.summary.FileWriter('/tmp/tf', sess.graph)
writer.close()
sess.close()
#####################################################################################
# <codecell> 3.2.2: 张量
#####################################################################################
a = tf.constant([5,3], name='input_a')
b = tf.reduce_prod(a, name='prod_b')
c = tf.reduce_sum(a, name='sum_c')
d = tf.add(b, c, name='add_d')
sess = tf.Session()
sess.run(d)
writer = tf.summary.FileWriter('/tmp/tf', sess.graph)
writer.close()
sess.close()
#####################################################################################
# <codecell> 3.2.5: Graph
#####################################################################################
g1 = tf.get_default_graph()
g2 = tf.Graph()
with g1.as_default():
sess = tf.Session(graph=g1)
a = tf.constant([1, 2], name='input_a')
b = tf.constant([2, 4], name='input_b')
c = a + b
sess.run(c)
writer = tf.summary.FileWriter('/tmp/tf', sess.graph)
writer.close()
sess.close()
with g2.as_default():
sess = tf.Session(graph=g2)
e = tf.constant([1, 2], name='input_e')
f = tf.constant([2, 4], name='input_f')
g = e + f
sess.run(g)
writer = tf.summary.FileWriter('/tmp/tf', sess.graph)
writer.close()
sess.close()
#####################################################################################
# <codecell> 3.2.7: 占位节点
#####################################################################################
a = tf.placeholder(dtype=tf.int32, shape=(2), name='input_a')
b = tf.reduce_prod(a, name='prod_b')
c = tf.reduce_sum(a, name='sum_c')
d = tf.add(b, c, name='add_d')
sess = tf.Session()
input_dict = {a: np.array([5,3], dtype=np.int32)}
print(sess.run(fetches=d, feed_dict=input_dict))
#####################################################################################
# <codecell> 变量Variable
#####################################################################################
a = tf.Variable(3, trainable=True, name='var_a')
print(type(a))
# output: <class 'tensorflow.python.ops.variables.RefVariable'>
b = tf.constant(3, dtype=tf.int32, shape=[1], name='var_b')
print(type(b))
# output: <class 'tensorflow.python.framework.ops.Tensor'>
add = tf.add(a, 10, name='add')
print(type(add))
# output: <class 'tensorflow.python.framework.ops.Tensor'>
mul = tf.multiply(b, 20, name='mul')
zeros = tf.zeros(shape=[2, 2], dtype=tf.int32, name='zeros')
print(type(zeros))
# output: <class 'tensorflow.python.framework.ops.Tensor'>
ones = tf.ones(shape=[6], dtype=tf.float32)
init = tf.global_variables_initializer()
# output: <class 'tensorflow.python.framework.ops.Operation'>
print(type(init))
sess = tf.Session()
print(sess.run(init))
# output: None; 只初始化变量, 没有op操作
##
var1 = tf.Variable(1, name='var1')
var2 = tf.Variable(2, name='var2')
init1 = tf.variables_initializer([var1], name='init_var1')
sess = tf.Session()
sess.run(init1)
##
my_var = tf.Variable(1)
my_var_times_two = my_var.assign(my_var * 2)
# output: <class 'tensorflow.python.framework.ops.Tensor'>
init = tf.global_variables_initializer()
sess = tf.Session()
print(sess.run(init))
print(sess.run(my_var_times_two)) # 2
print(sess.run(my_var_times_two)) # 4
print(sess.run(my_var_times_two)) # 8
##
my_var1 = tf.Variable(1)
my_var2 = tf.Variable(2)
my_var_times_two = my_var2.assign(my_var1 * 2)
init = tf.global_variables_initializer()
sess = tf.Session()
print(sess.run(init))
print(sess.run(my_var_times_two)) # 2
print(sess.run(my_var_times_two)) # 2
print(sess.run(my_var_times_two)) # 2
##
sess.run(my_var1.assign_add(1)) # 2
sess.run(my_var1.assign_add(1)) # 3
sess.run(my_var1.assign_sub(1)) # 2
#####################################################################################
# <codecell> name scope
#####################################################################################
graph = tf.Graph()
with graph.as_default():
ph_input1 = tf.placeholder(dtype=tf.float32, shape=[], name='ph_input1')
ph_input2 = tf.placeholder(dtype=tf.float32, shape=[], name='ph_input2')
# 常量Tensor在每个域中都会存在
k_input3 = tf.constant(3, dtype=tf.float32, name='static_value')
with tf.name_scope('A_scope'):
A_mul = tf.math.multiply(ph_input1, k_input3)
A_out = tf.math.subtract(A_mul, ph_input1)
with tf.name_scope('B_scope'):
B_mul = tf.multiply(ph_input2, k_input3)
B_out = tf.math.subtract(B_mul, ph_input2)
with tf.name_scope('C_scope'):
C_div = tf.math.divide(A_out, B_out)
C_out = tf.math.add(C_div, k_input3)
with tf.name_scope('D_scope'):
D_div = tf.math.divide(B_out, A_out)
D_out = tf.math.add(D_div, k_input3)
out = tf.maximum(C_out, D_out)
writer = tf.summary.FileWriter('/tmp/tf', graph=graph)
writer.close()
#####################################################################################
# <codecell> 实例 [None]:任意长度的向量, []:标量
#####################################################################################
# scope: transformation
# +--------------------------------------------------------------------+
# | b |
# | a ***** |
# | +-------+ [None] ** ** [] |
# | | | ----------> * prod * ------\ d |
# | | | ** ** \ ******* |
# | | | ***** \ ** ** | []
# ----->| input | -->* add * ---------->
# | | | c -->** ** | |
# | | | ***** / ******* | |
# | | | [None] ** ** [] / | |
# | | | ----------> * sum * ------/ | |
# | +-------+ ** ** | |
# | ***** | |
# | layer: input layer: intermediate layer: output | |
# +--------------------------------------------------------------------+ |
# |
# |
# +------------------+
# |
# scope: summaries scope: update v
# +--------------------------+ +------------------------+
# | | | update_total |
# | | +---- | update_steps |------+
# | | | +------------------------+ |
# | output_summary <---------------| |
# | | |[] |[]
# | | | scope: variables |
# | total_summary <---------------| +------------------------+ |
# | | | | | |
# | | | | | |
# | avg_summary <---------------| | output_total <-----------|
# | | | | |
# +--------------------------+ | global_steps <-----------|
# | |
# +------------------------+
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('variables'):
output_total = tf.Variable(0.0, trainable=False,
dtype=tf.float32, name='output_total')
global_step = tf.Variable(0, trainable=False,
dtype=tf.int32, name='global_step')
with tf.name_scope('transformation'):
with tf.name_scope('input'):
a = tf.placeholder(dtype=tf.float32, shape=[None], name='input_a')
with tf.name_scope('intermediate'):
b = tf.reduce_prod(a, name='prod_b')
c = tf.reduce_sum(a, name='sum_c')
with tf.name_scope('output'):
d = tf.add(b, c, name='output_d')
with tf.name_scope('update'):
update_total = output_total.assign_add(d)
update_step = global_step.assign_add(1)
with tf.name_scope('summaries'):
avg = tf.math.divide(update_total,
tf.cast(update_step, tf.float32), name='avg')
tf.summary.scalar(name='output_summary', tensor=d)
tf.summary.scalar(name='total_summary', tensor=update_total)
tf.summary.scalar(name='avg_summary', tensor=avg)
with tf.name_scope('global_ops'):
init = tf.global_variables_initializer()
merged_summaries = tf.summary.merge_all()
sess = tf.Session(graph=graph)
writer = tf.summary.FileWriter('/tmp/tf', graph)
sess.run(init)
def run_graph(input_tensor):
feed_dict = {a: input_tensor}
_, step, summaries = sess.run(
fetches=[d, update_step, merged_summaries], feed_dict=feed_dict)
print(step)
print(d)
print(update_total)
writer.add_summary(summary=summaries, global_step=step)
run_graph([2, 8])
run_graph([3, 1, 3, 3])
run_graph([8])
run_graph([1,2,4])
run_graph([11,4])
| mit |
yasoob/youtube-dl-GUI | youtube_dl/extractor/tfo.py | 11 | 2038 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
HEADRequest,
ExtractorError,
int_or_none,
clean_html,
)
class TFOIE(InfoExtractor):
_GEO_COUNTRIES = ['CA']
_VALID_URL = r'https?://(?:www\.)?tfo\.org/(?:en|fr)/(?:[^/]+/){2}(?P<id>\d+)'
_TEST = {
'url': 'http://www.tfo.org/en/universe/tfo-247/100463871/video-game-hackathon',
'md5': 'cafbe4f47a8dae0ca0159937878100d6',
'info_dict': {
'id': '7da3d50e495c406b8fc0b997659cc075',
'ext': 'mp4',
'title': 'Video Game Hackathon',
'description': 'md5:558afeba217c6c8d96c60e5421795c07',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
self._request_webpage(HEADRequest('http://www.tfo.org/'), video_id)
infos = self._download_json(
'http://www.tfo.org/api/web/video/get_infos', video_id, data=json.dumps({
'product_id': video_id,
}).encode(), headers={
'X-tfo-session': self._get_cookies('http://www.tfo.org/')['tfo-session'].value,
})
if infos.get('success') == 0:
if infos.get('code') == 'ErrGeoBlocked':
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(infos['msg'])), expected=True)
video_data = infos['data']
return {
'_type': 'url_transparent',
'id': video_id,
'url': 'limelight:media:' + video_data['llid'],
'title': video_data['title'],
'description': video_data.get('description'),
'series': video_data.get('collection'),
'season_number': int_or_none(video_data.get('season')),
'episode_number': int_or_none(video_data.get('episode')),
'duration': int_or_none(video_data.get('duration')),
'ie_key': 'LimelightMedia',
}
| mit |
dwagon/explorertools | explorer/kstat.py | 1 | 4834 | #!/usr/local/bin/python
#
# Script to understand kstat output
#
# Written by Dougal Scott <[email protected]>
# $Id: kstat.py 2393 2012-06-01 06:38:17Z dougals $
# $HeadURL: http://svn/ops/unix/explorer/trunk/explorer/kstat.py $
import os
import sys
import getopt
import re
import explorerbase
##########################################################################
# Chain ##################################################################
##########################################################################
class Chain(explorerbase.ExplorerBase):
""" All the kstat information for a single module:name pair
"""
##########################################################################
def __init__(self, config, module, name, instance):
explorerbase.ExplorerBase.__init__(self, config)
self.module = module
self.objname = name
self.instance = instance
self.class_ = None
self.snaptime = None
##########################################################################
def addVal(self, stat, val):
if stat == 'class':
self.class_ = val
elif stat == 'snaptime':
self.snaptime = val
else:
self[stat] = val
##########################################################################
def __repr__(self):
return "<Chain: module=%s name=%s instance=%s class_=%s>" % (self.module, self.objname, self.instance, self.class_)
##########################################################################
def printLink(self):
str = ""
for k in sorted(self.data.keys()):
str += "%s=%s\n" % (k, self[k])
return str
##########################################################################
# Kstat ##################################################################
##########################################################################
class Kstat(explorerbase.ExplorerBase):
"""Understand explorer output with respect to kstat
"""
##########################################################################
def __init__(self, config):
explorerbase.ExplorerBase.__init__(self, config)
self.parseKstat()
##########################################################################
def parseKstat(self):
if not self.exists('netinfo/kstat-p.out'):
return
f = self.open('netinfo/kstat-p.out')
for line in f:
line = line.strip()
try: # Start of new instances don't have data
id, val = line.split('\t')
except ValueError:
continue
bits = id.split(':')
module = bits[0]
instance = bits[1]
statistic = bits[-1]
name = ":".join(bits[2:-1]) # Sigh, so much for a clean concept
if module not in self:
self[module] = {}
if name not in self[module]:
self[module][name] = {}
if instance not in self[module][name]:
self[module][name][instance] = Chain(
self.config, module, name, instance)
self[module][name][instance].addVal(statistic, val)
f.close()
##########################################################################
def moduleList(self):
return sorted(self.keys())
##########################################################################
def nameList(self, module=None):
names = []
if module == None:
modlist = self.moduleList()
else:
modlist = [module]
for mod in modlist:
names.extend(self[mod].keys())
return sorted(names)
##########################################################################
def instanceList(self, module, name):
""" Return the instances that belong to the
"""
return self[module][name].keys()
##########################################################################
def classChains(self, class_, module=None):
""" Return all the chains that belong to the specified class_
module can legitimately be '' - so can't check for Falsehood
"""
if module == None:
modlist = self.moduleList()
else:
modlist = [module]
classlist = []
for m in modlist:
for n in self.nameList(m):
for i in self[m][n]:
if self[m][n][i].class_ == class_:
classlist.append(self[m][n][i])
return classlist
##########################################################################
def chain(self, module, name, instance):
c = self[module][name][instance]
return c
# EOF
| gpl-2.0 |
aviweit/libcloud | libcloud/utils/iso8601.py | 58 | 4302 | """
Copyright (c) 2007 Michael Twomey
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
>>>
"""
# Taken from pyiso8601 which is licensed under the MIT license.
from datetime import datetime, timedelta, tzinfo
import re
__all__ = ["parse_date", "ParseError"]
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX = re.compile(
r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?" # NOQA
r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?")
TIMEZONE_REGEX = re.compile("(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})") # NOQA
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
# Yoinked from python docs
ZERO = timedelta(0)
class Utc(tzinfo):
"""UTC
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = Utc()
class FixedOffset(tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r>" % self.__name
def parse_timezone(tzstring, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if tzstring == "Z":
return default_timezone
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if tzstring is None:
return default_timezone
m = TIMEZONE_REGEX.match(tzstring)
prefix, hours, minutes = m.groups()
hours, minutes = int(hours), int(minutes)
if prefix == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, tzstring)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]),
int(groups["day"]), int(groups["hour"]),
int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz)
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pip/commands/completion.py | 435 | 1991 | from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command to be used for command completion'
hidden = True
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
| mit |
jabesq/home-assistant | tests/components/python_script/test_init.py | 22 | 9516 | """Test the python_script component."""
import asyncio
import logging
from unittest.mock import patch, mock_open
from homeassistant.setup import async_setup_component
from homeassistant.components.python_script import execute
@asyncio.coroutine
def test_setup(hass):
"""Test we can discover scripts."""
scripts = [
'/some/config/dir/python_scripts/hello.py',
'/some/config/dir/python_scripts/world_beer.py'
]
with patch('homeassistant.components.python_script.os.path.isdir',
return_value=True), \
patch('homeassistant.components.python_script.glob.iglob',
return_value=scripts):
res = yield from async_setup_component(hass, 'python_script', {})
assert res
assert hass.services.has_service('python_script', 'hello')
assert hass.services.has_service('python_script', 'world_beer')
with patch('homeassistant.components.python_script.open',
mock_open(read_data='fake source'), create=True), \
patch('homeassistant.components.python_script.execute') as mock_ex:
yield from hass.services.async_call(
'python_script', 'hello', {'some': 'data'}, blocking=True)
assert len(mock_ex.mock_calls) == 1
hass, script, source, data = mock_ex.mock_calls[0][1]
assert hass is hass
assert script == 'hello.py'
assert source == 'fake source'
assert data == {'some': 'data'}
@asyncio.coroutine
def test_setup_fails_on_no_dir(hass, caplog):
"""Test we fail setup when no dir found."""
with patch('homeassistant.components.python_script.os.path.isdir',
return_value=False):
res = yield from async_setup_component(hass, 'python_script', {})
assert not res
assert 'Folder python_scripts not found in configuration folder' in \
caplog.text
@asyncio.coroutine
def test_execute_with_data(hass, caplog):
"""Test executing a script."""
caplog.set_level(logging.WARNING)
source = """
hass.states.set('test.entity', data.get('name', 'not set'))
"""
hass.async_add_job(execute, hass, 'test.py', source, {'name': 'paulus'})
yield from hass.async_block_till_done()
assert hass.states.is_state('test.entity', 'paulus')
# No errors logged = good
assert caplog.text == ''
@asyncio.coroutine
def test_execute_warns_print(hass, caplog):
"""Test print triggers warning."""
caplog.set_level(logging.WARNING)
source = """
print("This triggers warning.")
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Don't use print() inside scripts." in caplog.text
@asyncio.coroutine
def test_execute_logging(hass, caplog):
"""Test logging works."""
caplog.set_level(logging.INFO)
source = """
logger.info('Logging from inside script')
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Logging from inside script" in caplog.text
@asyncio.coroutine
def test_execute_compile_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
this is not valid Python
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Error loading script test.py" in caplog.text
@asyncio.coroutine
def test_execute_runtime_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
raise Exception('boom')
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Error executing script: boom" in caplog.text
@asyncio.coroutine
def test_accessing_async_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
hass.async_stop()
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Not allowed to access async methods" in caplog.text
@asyncio.coroutine
def test_using_complex_structures(hass, caplog):
"""Test that dicts and lists work."""
caplog.set_level(logging.INFO)
source = """
mydict = {"a": 1, "b": 2}
mylist = [1, 2, 3, 4]
logger.info('Logging from inside script: %s %s' % (mydict["a"], mylist[2]))
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Logging from inside script: 1 3" in caplog.text
@asyncio.coroutine
def test_accessing_forbidden_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
for source, name in {
'hass.stop()': 'HomeAssistant.stop',
'dt_util.set_default_time_zone()': 'module.set_default_time_zone',
'datetime.non_existing': 'module.non_existing',
'time.tzset()': 'TimeWrapper.tzset',
}.items():
caplog.records.clear()
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert "Not allowed to access {}".format(name) in caplog.text
@asyncio.coroutine
def test_iterating(hass):
"""Test compile error logs error."""
source = """
for i in [1, 2]:
hass.states.set('hello.{}'.format(i), 'world')
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert hass.states.is_state('hello.1', 'world')
assert hass.states.is_state('hello.2', 'world')
@asyncio.coroutine
def test_unpacking_sequence(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
a,b = (1,2)
ab_list = [(a,b) for a,b in [(1, 2), (3, 4)]]
hass.states.set('hello.a', a)
hass.states.set('hello.b', b)
hass.states.set('hello.ab_list', '{}'.format(ab_list))
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert hass.states.is_state('hello.a', '1')
assert hass.states.is_state('hello.b', '2')
assert hass.states.is_state('hello.ab_list', '[(1, 2), (3, 4)]')
# No errors logged = good
assert caplog.text == ''
@asyncio.coroutine
def test_execute_sorted(hass, caplog):
"""Test sorted() function."""
caplog.set_level(logging.ERROR)
source = """
a = sorted([3,1,2])
assert(a == [1,2,3])
hass.states.set('hello.a', a[0])
hass.states.set('hello.b', a[1])
hass.states.set('hello.c', a[2])
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert hass.states.is_state('hello.a', '1')
assert hass.states.is_state('hello.b', '2')
assert hass.states.is_state('hello.c', '3')
# No errors logged = good
assert caplog.text == ''
@asyncio.coroutine
def test_exposed_modules(hass, caplog):
"""Test datetime and time modules exposed."""
caplog.set_level(logging.ERROR)
source = """
hass.states.set('module.time', time.strftime('%Y', time.gmtime(521276400)))
hass.states.set('module.time_strptime',
time.strftime('%H:%M', time.strptime('12:34', '%H:%M')))
hass.states.set('module.datetime',
datetime.timedelta(minutes=1).total_seconds())
"""
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert hass.states.is_state('module.time', '1986')
assert hass.states.is_state('module.time_strptime', '12:34')
assert hass.states.is_state('module.datetime', '60.0')
# No errors logged = good
assert caplog.text == ''
@asyncio.coroutine
def test_reload(hass):
"""Test we can re-discover scripts."""
scripts = [
'/some/config/dir/python_scripts/hello.py',
'/some/config/dir/python_scripts/world_beer.py'
]
with patch('homeassistant.components.python_script.os.path.isdir',
return_value=True), \
patch('homeassistant.components.python_script.glob.iglob',
return_value=scripts):
res = yield from async_setup_component(hass, 'python_script', {})
assert res
assert hass.services.has_service('python_script', 'hello')
assert hass.services.has_service('python_script', 'world_beer')
assert hass.services.has_service('python_script', 'reload')
scripts = [
'/some/config/dir/python_scripts/hello2.py',
'/some/config/dir/python_scripts/world_beer.py'
]
with patch('homeassistant.components.python_script.os.path.isdir',
return_value=True), \
patch('homeassistant.components.python_script.glob.iglob',
return_value=scripts):
yield from hass.services.async_call(
'python_script', 'reload', {}, blocking=True)
assert not hass.services.has_service('python_script', 'hello')
assert hass.services.has_service('python_script', 'hello2')
assert hass.services.has_service('python_script', 'world_beer')
assert hass.services.has_service('python_script', 'reload')
@asyncio.coroutine
def test_sleep_warns_one(hass, caplog):
"""Test time.sleep warns once."""
caplog.set_level(logging.WARNING)
source = """
time.sleep(2)
time.sleep(5)
"""
with patch('homeassistant.components.python_script.time.sleep'):
hass.async_add_job(execute, hass, 'test.py', source, {})
yield from hass.async_block_till_done()
assert caplog.text.count('time.sleep') == 1
| apache-2.0 |
the5fire/onlinetodos | fabfile/__init__.py | 1 | 1594 | #coding:utf-8
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
from fabric.api import task, roles, cd
from fabric.state import env
from essay.tasks import build
from essay.tasks import deploy
from essay.tasks import virtualenv, supervisor, package, git
env.GIT_SERVER = 'https://github.com/' # ssh地址只需要填:'github.com'
env.PROJECT = 'onlinetodos'
env.PROJECT_OWNER = 'the5fire'
env.DEFAULT_BRANCH = 'master'
######
# deploy settings:
env.PROCESS_COUNT = 2 #部署时启动的进程数目
env.roledefs = {
'online': ['[email protected]'] # 打包服务器配置
}
env.VIRTUALENV_PREFIX = '/home/the5firetodo/'
env.SUPERVISOR_CONF_TEMPLATE = os.path.join(PROJECT_ROOT, 'conf', 'supervisord.conf')
PROJECT_NUM = 310
env.VENV_PORT_PREFIX_MAP = {
'a': '%d0' % PROJECT_NUM,
'b': '%d1' % PROJECT_NUM,
'c': '%d2' % PROJECT_NUM,
'd': '%d3' % PROJECT_NUM,
'e': '%d4' % PROJECT_NUM,
'f': '%d5' % PROJECT_NUM,
'g': '%d6' % PROJECT_NUM,
'h': '%d7' % PROJECT_NUM,
'i': '%d8' % PROJECT_NUM,
}
@task(default=True)
@roles('online')
def git_deploy(venv_dir, profile):
virtualenv.ensure(venv_dir)
with virtualenv.activate(venv_dir):
supervisor.ensure(project=env.PROJECT, profile=profile)
package.install_from_git(env.PROJECT)
supervisor.shutdown()
supervisor.start()
HOST_PATH = '/home/the5firetodo/a/src/onlinetodos/'
@task(default=True)
@roles('online')
def re_deploy(venv_dir, br="master"):
with cd(HOST_PATH):
git.checkout(br)
supervisor.reload(venv_dir)
| apache-2.0 |
aleksandaratanasov/catkin_pkg | src/catkin_pkg/package.py | 3 | 23688 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for parsing package.xml and providing an object
representation.
"""
from __future__ import print_function
from copy import deepcopy
import os
import re
import sys
import xml.dom.minidom as dom
PACKAGE_MANIFEST_FILENAME = 'package.xml'
class Package(object):
"""
Object representation of a package manifest file
"""
__slots__ = [
'package_format',
'name',
'version',
'version_abi',
'description',
'maintainers',
'licenses',
'urls',
'authors',
'build_depends',
'buildtool_depends',
'build_export_depends',
'buildtool_export_depends',
'exec_depends',
'test_depends',
'doc_depends',
'conflicts',
'replaces',
'exports',
'filename'
]
def __init__(self, filename=None, **kwargs):
"""
:param filename: location of package.xml. Necessary if
converting ``${prefix}`` in ``<export>`` values, ``str``.
"""
# initialize all slots ending with "s" with lists, all other with plain values
for attr in self.__slots__:
if attr.endswith('s'):
value = list(kwargs[attr]) if attr in kwargs else []
setattr(self, attr, value)
else:
value = kwargs[attr] if attr in kwargs else None
setattr(self, attr, value)
if 'depends' in kwargs:
for d in kwargs['depends']:
for slot in [self.build_depends, self.build_export_depends, self.exec_depends]:
if d not in slot:
slot.append(deepcopy(d))
del kwargs['depends']
if 'run_depends' in kwargs:
for d in kwargs['run_depends']:
for slot in [self.build_export_depends, self.exec_depends]:
if d not in slot:
slot.append(deepcopy(d))
del kwargs['run_depends']
self.filename = filename
# verify that no unknown keywords are passed
unknown = set(kwargs.keys()).difference(self.__slots__)
if unknown:
raise TypeError('Unknown properties: %s' % ', '.join(unknown))
def __getattr__(self, name):
if name == 'run_depends':
# merge different dependencies if they are not exactly equal
# potentially having the same dependency name multiple times with different attributes
run_depends = []
[run_depends.append(deepcopy(d)) for d in self.exec_depends + self.build_export_depends if d not in run_depends]
return run_depends
raise AttributeError(name)
def __getitem__(self, key):
if key in self.__slots__ + ['run_depends']:
return getattr(self, key)
raise KeyError('Unknown key "%s"' % key)
def __iter__(self):
for slot in self.__slots__:
yield slot
def __str__(self):
data = {}
for attr in self.__slots__:
data[attr] = getattr(self, attr)
return str(data)
def has_buildtool_depend_on_catkin(self):
"""
Returns True if this Package buildtool depends on catkin, otherwise False
:returns: True if the given package buildtool depends on catkin
:rtype: bool
"""
return 'catkin' in [d.name for d in self.buildtool_depends]
def has_invalid_metapackage_dependencies(self):
"""
Returns True if this package has invalid dependencies for a metapackage
This is defined by REP-0127 as any non-run_depends dependencies other then a buildtool_depend on catkin.
:returns: True if the given package has any invalid dependencies, otherwise False
:rtype: bool
"""
buildtool_depends = [d.name for d in self.buildtool_depends if d.name != 'catkin']
return len(self.build_depends + buildtool_depends + self.test_depends) > 0
def is_metapackage(self):
"""
Returns True if this pacakge is a metapackage, otherwise False
:returns: True if metapackage, else False
:rtype: bool
"""
return 'metapackage' in [e.tagname for e in self.exports]
def validate(self, warnings=None):
"""
makes sure all standards for packages are met
:param package: Package to check
:param warnings: Print warnings if None or return them in the given list
:raises InvalidPackage: in case validation fails
"""
errors = []
new_warnings = []
if self.package_format:
if not re.match('^[1-9][0-9]*$', str(self.package_format)):
errors.append('The "format" attribute of the package must contain a positive integer if present')
if not self.name:
errors.append('Package name must not be empty')
# accepting upper case letters and hyphens only for backward compatibility
if not re.match('^[a-zA-Z0-9][a-zA-Z0-9_-]*$', self.name):
errors.append('Package name "%s" does not follow naming conventions' % self.name)
elif not re.match('^[a-z][a-z0-9_]*$', self.name):
new_warnings.append('Package name "%s" does not follow the naming conventions. It should start with a lower case letter and only contain lower case letters, digits and underscores.' % self.name)
if not self.version:
errors.append('Package version must not be empty')
elif not re.match('^[0-9]+\.[0-9]+\.[0-9]+$', self.version):
errors.append('Package version "%s" does not follow version conventions' % self.version)
elif not re.match('^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)$', self.version):
new_warnings.append('Package "%s" does not follow the version conventions. It should not contain leading zeros (unless the number is 0).' % self.name)
if not self.description:
errors.append('Package description must not be empty')
if not self.maintainers:
errors.append('Package must declare at least one maintainer')
for maintainer in self.maintainers:
try:
maintainer.validate()
except InvalidPackage as e:
errors.append(str(e))
if not maintainer.email:
errors.append('Maintainers must have an email address')
if not self.licenses:
errors.append('The package node must contain at least one "license" tag')
if [l for l in self.licenses if not l.strip()]:
errors.append('The license tag must neither be empty nor only contain whitespaces')
if self.authors is not None:
for author in self.authors:
try:
author.validate()
except InvalidPackage as e:
errors.append(str(e))
dep_types = {
'build': self.build_depends,
'buildtool': self.buildtool_depends,
'build_export': self.build_export_depends,
'buildtool_export': self.buildtool_export_depends,
'exec': self.exec_depends,
'test': self.test_depends,
'doc': self.doc_depends
}
for dep_type, depends in dep_types.items():
for depend in depends:
if depend.name == self.name:
errors.append('The package must not "%s_depend" on a package with the same name as this package' % dep_type)
if self.is_metapackage():
if not self.has_buildtool_depend_on_catkin():
# TODO escalate to error in the future, or use metapackage.validate_metapackage
new_warnings.append('Metapackage "%s" must buildtool_depend on catkin.' % self.name)
if self.has_invalid_metapackage_dependencies():
new_warnings.append('Metapackage "%s" should not have other dependencies besides a '
'buildtool_depend on catkin and run_depends.' % self.name)
for warning in new_warnings:
if warnings is None:
print('WARNING: ' + warning, file=sys.stderr)
elif warning not in warnings:
warnings.append(warning)
if errors:
raise InvalidPackage('\n'.join(errors))
class Dependency(object):
__slots__ = ['name', 'version_lt', 'version_lte', 'version_eq', 'version_gte', 'version_gt']
def __init__(self, name, **kwargs):
for attr in self.__slots__:
value = kwargs[attr] if attr in kwargs else None
setattr(self, attr, value)
self.name = name
# verify that no unknown keywords are passed
unknown = set(kwargs.keys()).difference(self.__slots__)
if unknown:
raise TypeError('Unknown properties: %s' % ', '.join(unknown))
def __eq__(self, other):
if not isinstance(other, Dependency):
return False
return all([getattr(self, attr) == getattr(other, attr) for attr in self.__slots__])
def __str__(self):
return self.name
class Export(object):
__slots__ = ['tagname', 'attributes', 'content']
def __init__(self, tagname, content=None):
self.tagname = tagname
self.attributes = {}
self.content = content
def __str__(self):
txt = '<%s' % self.tagname
for key in sorted(self.attributes.keys()):
txt += ' %s="%s"' % (key, self.attributes[key])
if self.content:
txt += '>%s</%s>' % (self.content, self.tagname)
else:
txt += '/>'
return txt
class Person(object):
__slots__ = ['name', 'email']
def __init__(self, name, email=None):
self.name = name
self.email = email
def __str__(self):
name = self.name
if not isinstance(name, str):
name = name.encode('utf-8')
if self.email is not None:
return '%s <%s>' % (name, self.email)
else:
return '%s' % name
def validate(self):
if self.email is None:
return
if not re.match('^[-a-zA-Z0-9_%+]+(\.[-a-zA-Z0-9_%+]+)*@[-a-zA-Z0-9%]+(\.[-a-zA-Z0-9%]+)*\.[a-zA-Z]{2,}$', self.email):
raise InvalidPackage('Invalid email "%s" for person "%s"' % (self.email, self.name))
class Url(object):
__slots__ = ['url', 'type']
def __init__(self, url, type_=None):
self.url = url
self.type = type_
def __str__(self):
return self.url
def parse_package_for_distutils(path=None):
print('WARNING: %s/setup.py: catkin_pkg.package.parse_package_for_distutils() is deprecated. Please use catkin_pkg.python_setup.generate_distutils_setup(**kwargs) instead.' % os.path.basename(os.path.abspath('.')))
from .python_setup import generate_distutils_setup
data = {}
if path is not None:
data['package_xml_path'] = path
return generate_distutils_setup(**data)
class InvalidPackage(Exception):
pass
def package_exists_at(path):
"""
Checks that a package exists at the given path
:param path: path to a package
:type path: str
:returns: True if package exists in given path, else False
:rtype: bool
"""
return os.path.isdir(path) and os.path.isfile(os.path.join(path, PACKAGE_MANIFEST_FILENAME))
def parse_package(path, warnings=None):
"""
Parse package manifest.
:param path: The path of the package.xml file, it may or may not
include the filename
:param warnings: Print warnings if None or return them in the given list
:returns: return :class:`Package` instance, populated with parsed fields
:raises: :exc:`InvalidPackage`
:raises: :exc:`IOError`
"""
if os.path.isfile(path):
filename = path
elif package_exists_at(path):
filename = os.path.join(path, PACKAGE_MANIFEST_FILENAME)
if not os.path.isfile(filename):
raise IOError('Directory "%s" does not contain a "%s"' % (path, PACKAGE_MANIFEST_FILENAME))
else:
raise IOError('Path "%s" is neither a directory containing a "%s" file nor a file' % (path, PACKAGE_MANIFEST_FILENAME))
with open(filename, 'r') as f:
try:
return parse_package_string(f.read(), filename, warnings=warnings)
except InvalidPackage as e:
e.args = ['Invalid package manifest "%s": %s' % (filename, e.message)]
raise
def parse_package_string(data, filename=None, warnings=None):
"""
Parse package.xml string contents.
:param data: package.xml contents, ``str``
:param filename: full file path for debugging, ``str``
:param warnings: Print warnings if None or return them in the given list
:returns: return parsed :class:`Package`
:raises: :exc:`InvalidPackage`
"""
try:
root = dom.parseString(data)
except Exception as ex:
raise InvalidPackage('The manifest contains invalid XML:\n%s' % ex)
pkg = Package(filename)
# verify unique root node
nodes = _get_nodes(root, 'package')
if len(nodes) != 1:
raise InvalidPackage('The manifest must contain a single "package" root tag')
root = nodes[0]
# format attribute
value = _get_node_attr(root, 'format', default=1)
pkg.package_format = int(value)
assert pkg.package_format in [1, 2], "Unable to handle package.xml format version '%d', please update catkin_pkg (e.g. on Ubuntu/Debian use: sudo apt-get update && sudo apt-get install --only-upgrade python-catkin-pkg)" % pkg.package_format
# name
pkg.name = _get_node_value(_get_node(root, 'name'))
# version and optional abi
version_node = _get_node(root, 'version')
pkg.version = _get_node_value(version_node)
pkg.version_abi = _get_node_attr(version_node, 'abi', default=None)
# description
pkg.description = _get_node_value(_get_node(root, 'description'), allow_xml=True, apply_str=False)
# at least one maintainer, all must have email
maintainers = _get_nodes(root, 'maintainer')
for node in maintainers:
pkg.maintainers.append(Person(
_get_node_value(node, apply_str=False),
_get_node_attr(node, 'email')
))
# urls with optional type
urls = _get_nodes(root, 'url')
for node in urls:
pkg.urls.append(Url(
_get_node_value(node),
_get_node_attr(node, 'type', default='website')
))
# authors with optional email
authors = _get_nodes(root, 'author')
for node in authors:
pkg.authors.append(Person(
_get_node_value(node, apply_str=False),
_get_node_attr(node, 'email', default=None)
))
# at least one license
licenses = _get_nodes(root, 'license')
for node in licenses:
pkg.licenses.append(_get_node_value(node))
errors = []
# dependencies and relationships
pkg.build_depends = _get_dependencies(root, 'build_depend')
pkg.buildtool_depends = _get_dependencies(root, 'buildtool_depend')
if pkg.package_format == 1:
run_depends = _get_dependencies(root, 'run_depend')
for d in run_depends:
pkg.build_export_depends.append(deepcopy(d))
pkg.exec_depends.append(deepcopy(d))
if pkg.package_format == 2:
pkg.build_export_depends = _get_dependencies(root, 'build_export_depend')
pkg.buildtool_export_depends = _get_dependencies(root, 'buildtool_export_depend')
pkg.exec_depends = _get_dependencies(root, 'exec_depend')
depends = _get_dependencies(root, 'depend')
for dep in depends:
# check for collisions with specific dependencies
same_build_depends = ['build_depend' for d in pkg.build_depends if d.name == dep.name]
same_build_export_depends = ['build_export_depend' for d in pkg.build_export_depends if d.name == dep.name]
same_exec_depends = ['exec_depend' for d in pkg.exec_depends if d.name == dep.name]
if same_build_depends or same_build_export_depends or same_exec_depends:
errors.append("The generic dependency on '%s' is redundant with: %s" % (dep.name, ', '.join(same_build_depends + same_build_export_depends + same_exec_depends)))
# only append non-duplicates
if not same_build_depends:
pkg.build_depends.append(deepcopy(dep))
if not same_build_export_depends:
pkg.build_export_depends.append(deepcopy(dep))
if not same_exec_depends:
pkg.exec_depends.append(deepcopy(dep))
pkg.doc_depends = _get_dependencies(root, 'doc_depend')
pkg.test_depends = _get_dependencies(root, 'test_depend')
pkg.conflicts = _get_dependencies(root, 'conflict')
pkg.replaces = _get_dependencies(root, 'replace')
if pkg.package_format == 1:
for test_depend in pkg.test_depends:
same_build_depends = ['build_depend' for d in pkg.build_depends if d.name == test_depend.name]
same_run_depends = ['run_depend' for d in pkg.run_depends if d.name == test_depend.name]
if same_build_depends or same_run_depends:
errors.append('The test dependency on "%s" is redundant with: %s' % (test_depend.name, ', '.join(same_build_depends + same_run_depends)))
# exports
export_node = _get_optional_node(root, 'export')
if export_node is not None:
exports = []
for node in [n for n in export_node.childNodes if n.nodeType == n.ELEMENT_NODE]:
export = Export(str(node.tagName), _get_node_value(node, allow_xml=True))
for key, value in node.attributes.items():
export.attributes[str(key)] = str(value)
exports.append(export)
pkg.exports = exports
# verify that no unsupported tags and attributes are present
unknown_root_attributes = [attr for attr in root.attributes.keys() if str(attr) != 'format']
if unknown_root_attributes:
errors.append('The "package" tag must not have the following attributes: %s' % ', '.join(unknown_root_attributes))
depend_attributes = ['version_lt', 'version_lte', 'version_eq', 'version_gte', 'version_gt']
known = {
'name': [],
'version': ['abi'],
'description': [],
'maintainer': ['email'],
'license': [],
'url': ['type'],
'author': ['email'],
'build_depend': depend_attributes,
'buildtool_depend': depend_attributes,
'test_depend': depend_attributes,
'conflict': depend_attributes,
'replace': depend_attributes,
'export': [],
}
if pkg.package_format == 1:
known.update({
'run_depend': depend_attributes,
})
if pkg.package_format == 2:
known.update({
'build_export_depend': depend_attributes,
'buildtool_export_depend': depend_attributes,
'depend': depend_attributes,
'exec_depend': depend_attributes,
'doc_depend': depend_attributes,
})
nodes = [n for n in root.childNodes if n.nodeType == n.ELEMENT_NODE]
unknown_tags = set([n.tagName for n in nodes if n.tagName not in known.keys()])
if unknown_tags:
errors.append('The manifest (with format version %d) must not contain the following tags: %s' % (pkg.package_format, ', '.join(unknown_tags)))
for node in [n for n in nodes if n.tagName in known.keys()]:
unknown_attrs = [str(attr) for attr in node.attributes.keys() if str(attr) not in known[node.tagName]]
if unknown_attrs:
errors.append('The "%s" tag must not have the following attributes: %s' % (node.tagName, ', '.join(unknown_attrs)))
if node.tagName not in ['description', 'export']:
subnodes = [n for n in node.childNodes if n.nodeType == n.ELEMENT_NODE]
if subnodes:
errors.append('The "%s" tag must not contain the following children: %s' % (node.tagName, ', '.join([n.tagName for n in subnodes])))
if errors:
raise InvalidPackage('Error(s) in %s:%s' % (filename, ''.join(['\n- %s' % e for e in errors])))
pkg.validate(warnings=warnings)
return pkg
def _get_nodes(parent, tagname):
return [n for n in parent.childNodes if n.nodeType == n.ELEMENT_NODE and n.tagName == tagname]
def _get_node(parent, tagname):
nodes = _get_nodes(parent, tagname)
if len(nodes) != 1:
raise InvalidPackage('The manifest must contain exactly one "%s" tags' % tagname)
return nodes[0]
def _get_optional_node(parent, tagname):
nodes = _get_nodes(parent, tagname)
if len(nodes) > 1:
raise InvalidPackage('The manifest must not contain more than one "%s" tags' % tagname)
return nodes[0] if nodes else None
def _get_node_value(node, allow_xml=False, apply_str=True):
if allow_xml:
value = (''.join([n.toxml() for n in node.childNodes])).strip(' \n\r\t')
else:
value = (''.join([n.data for n in node.childNodes if n.nodeType == n.TEXT_NODE])).strip(' \n\r\t')
if apply_str:
value = str(value)
return value
def _get_optional_node_value(parent, tagname, default=None):
node = _get_optional_node(parent, tagname)
if node is None:
return default
return _get_node_value(node)
def _get_node_attr(node, attr, default=False):
"""
:param default: False means value is required
"""
if node.hasAttribute(attr):
return str(node.getAttribute(attr))
if default is False:
raise InvalidPackage('The "%s" tag must have the attribute "%s"' % (node.tagName, attr))
return default
def _get_dependencies(parent, tagname):
depends = []
for node in _get_nodes(parent, tagname):
depend = Dependency(_get_node_value(node))
for attr in ['version_lt', 'version_lte', 'version_eq', 'version_gte', 'version_gt']:
setattr(depend, attr, _get_node_attr(node, attr, None))
depends.append(depend)
return depends
| bsd-3-clause |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/EGL/KHR/create_context.py | 8 | 1674 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_KHR_create_context'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_KHR_create_context',error_checker=_errors._error_checker)
EGL_CONTEXT_FLAGS_KHR=_C('EGL_CONTEXT_FLAGS_KHR',0x30FC)
EGL_CONTEXT_MAJOR_VERSION_KHR=_C('EGL_CONTEXT_MAJOR_VERSION_KHR',0x3098)
EGL_CONTEXT_MINOR_VERSION_KHR=_C('EGL_CONTEXT_MINOR_VERSION_KHR',0x30FB)
EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR=_C('EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR',0x00000002)
EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR=_C('EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR',0x00000001)
EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR=_C('EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR',0x00000001)
EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR=_C('EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR',0x00000002)
EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR=_C('EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR',0x30FD)
EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR=_C('EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR',0x31BD)
EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR=_C('EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR',0x00000004)
EGL_LOSE_CONTEXT_ON_RESET_KHR=_C('EGL_LOSE_CONTEXT_ON_RESET_KHR',0x31BF)
EGL_NO_RESET_NOTIFICATION_KHR=_C('EGL_NO_RESET_NOTIFICATION_KHR',0x31BE)
EGL_OPENGL_ES3_BIT=_C('EGL_OPENGL_ES3_BIT',0x00000040)
EGL_OPENGL_ES3_BIT_KHR=_C('EGL_OPENGL_ES3_BIT_KHR',0x00000040)
| lgpl-3.0 |
soileater/noobest | websites/views.py | 1 | 3987 | from utils.api import get_watcher
from django.shortcuts import render, HttpResponseRedirect
from utils.constants import ErrorList, division
from rank.models import Player
from rank.constants import RANK_INT
from rank.views import get_vector, get_score, get_data, get_rank
import operator
import json
def index(request):
return render(request, "index.html", locals())
def about(request):
return render(request, "about.html", locals())
def result(request, username):
username = username.replace("-", "%20")
print username
player = Player.objects.get(username=username)
print player
friends_id = json.loads(player.friends.replace("'", '"'))['ids']
players = Player.objects.filter(userid__in=friends_id).order_by('evaluation', 'total_score')
print [player.username + " rank: " + str(player.evaluation) + " total score: " + str(player.total_score) for player in players]
if len(players) > 0:
noobest = players[0].username
players_1 = players[0]
cs = players_1.get_user_vector_cs()
if len(players) > 1:
players_2 = players[1]
if len(players) > 2:
players_3 = players[2]
return render(request, "result.html", locals())
def transition(request, username):
return render(request, "transition.html", locals())
def testing(request):
return render(request, "testing.html", locals())
def summoner(request):
if request.is_ajax():
#deal with the user input here.
# rank(user)
# redirect to result display page
return render(request, "result.html", locals())
return render(request, "search.html", locals())
def search(request):
if request.is_ajax():
if not 'name' in request.POST:
return render(request, "search.html", locals())
name = request.POST.get('name').replace(" ", "%20")
try:
me = get_watcher().get_summoner(name=name)
except:
error_code = ErrorList.USER_NOT_FOUND
return render(request, "search.html", locals())
match = get_watcher().get_match_list(me['id'],'na')
match_id_list = [i['matchId'] for i in match['matches'] if i['queue'] == 'TEAM_BUILDER_DRAFT_RANKED_5x5'][:9]
match_details = []
all_players_id = dict()
for match_id in match_id_list:
match_detail = get_watcher().get_match(match_id)
match_details.append(match_detail)
for data in match_detail['participantIdentities']:
if data['player']['summonerId'] in all_players_id:
all_players_id[data['player']['summonerId']].append([match_detail, data['participantId']])
else:
all_players_id[data['player']['summonerId']] = [[match_detail, data['participantId']]]
player, _ = Player.objects.get_or_create(username=name, userid=me['id'])
friends_id = [key for key, value in all_players_id.items() if len(value) > 1]
friends_id.remove(int(me['id']))
player.friends = json.dumps({'ids': friends_id})
player.save()
for fid in friends_id:
player_detail = get_watcher().get_league_entry(summoner_ids=[fid])
player, _ = Player.objects.get_or_create(userid=fid)
player.username = player_detail[str(fid)][0]['entries'][0]['playerOrTeamName']
player.rank = RANK_INT[player_detail[str(fid)][0]['tier']]
player.division = division[player_detail[str(fid)][0]['entries'][0]['division']]
data = get_data(fid, all_players_id)
player.vector = data
vector = get_vector(data)
index, rank = get_rank(vector)
score = get_score(vector, index)
player.evaluation = rank[0]
player.scores = score
player.total_score = score['total']
player.save()
#return render(request, "result.html", locals())
return render(request, "search.html", locals())
| mit |
srene/ns-3-mptcp | src/emu/bindings/modulegen__gcc_ILP32.py | 24 | 286058 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.emu', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection', import_from_module='ns.core')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate', import_from_module='ns.network')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## emu-helper.h (module 'emu'): ns3::EmuHelper [class]
module.add_class('EmuHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-rate.h (module 'network'): ns3::DataRateChecker [class]
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## data-rate.h (module 'network'): ns3::DataRateValue [class]
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice [class]
module.add_class('EmuNetDevice', parent=root_module['ns3::NetDevice'])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EncapsulationMode [enumeration]
module.add_enum('EncapsulationMode', ['ILLEGAL', 'DIX', 'LLC'], outer_class=root_module['ns3::EmuNetDevice'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3EmuHelper_methods(root_module, root_module['ns3::EmuHelper'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3EmuNetDevice_methods(root_module, root_module['ns3::EmuNetDevice'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3DataRate_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRate const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor]
cls.add_constructor([param('uint64_t', 'bps')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor]
cls.add_constructor([param('std::string', 'rate')])
## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]
cls.add_method('CalculateTxTime',
'double',
[param('uint32_t', 'bytes')],
is_const=True)
## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function]
cls.add_method('GetBitRate',
'uint64_t',
[],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3EmuHelper_methods(root_module, cls):
## emu-helper.h (module 'emu'): ns3::EmuHelper::EmuHelper(ns3::EmuHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmuHelper const &', 'arg0')])
## emu-helper.h (module 'emu'): ns3::EmuHelper::EmuHelper() [constructor]
cls.add_constructor([])
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName')],
is_const=True)
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(ns3::NodeContainer const & c) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer const &', 'c')],
is_const=True)
## emu-helper.h (module 'emu'): void ns3::EmuHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
## emu-helper.h (module 'emu'): void ns3::EmuHelper::SetQueue(std::string type, std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetQueue',
'void',
[param('std::string', 'type'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()')])
## emu-helper.h (module 'emu'): void ns3::EmuHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## emu-helper.h (module 'emu'): void ns3::EmuHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'pthread_t',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::FreezeResolution() [member function]
cls.add_method('FreezeResolution',
'void',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataRateChecker_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])
return
def register_Ns3DataRateValue_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]
cls.add_constructor([param('ns3::DataRate const &', 'value')])
## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function]
cls.add_method('Get',
'ns3::DataRate',
[],
is_const=True)
## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::DataRate const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3EmuNetDevice_methods(root_module, cls):
## emu-net-device.h (module 'emu'): static ns3::TypeId ns3::EmuNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EmuNetDevice() [constructor]
cls.add_constructor([])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetDataRate(ns3::DataRate bps) [member function]
cls.add_method('SetDataRate',
'void',
[param('ns3::DataRate', 'bps')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::Start(ns3::Time tStart) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time', 'tStart')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::Stop(ns3::Time tStop) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time', 'tStop')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetQueue(ns3::Ptr<ns3::Queue> queue) [member function]
cls.add_method('SetQueue',
'void',
[param('ns3::Ptr< ns3::Queue >', 'queue')])
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Queue> ns3::EmuNetDevice::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::Queue >',
[],
is_const=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## emu-net-device.h (module 'emu'): uint32_t ns3::EmuNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Channel> ns3::EmuNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## emu-net-device.h (module 'emu'): uint16_t ns3::EmuNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Node> ns3::EmuNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetEncapsulationMode(ns3::EmuNetDevice::EncapsulationMode mode) [member function]
cls.add_method('SetEncapsulationMode',
'void',
[param('ns3::EmuNetDevice::EncapsulationMode', 'mode')])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EncapsulationMode ns3::EmuNetDevice::GetEncapsulationMode() const [member function]
cls.add_method('GetEncapsulationMode',
'ns3::EmuNetDevice::EncapsulationMode',
[],
is_const=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
AmperificSuperKANG/lge_kernel_loki | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
SNAPPETITE/backend | flask/lib/python2.7/site-packages/flask_whooshalchemy.py | 59 | 9076 | '''
whooshalchemy flask extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds whoosh indexing capabilities to SQLAlchemy models for Flask
applications.
:copyright: (c) 2012 by Karl Gyllstrom
:license: BSD (see LICENSE.txt)
'''
from __future__ import with_statement
from __future__ import absolute_import
import flask.ext.sqlalchemy as flask_sqlalchemy
import sqlalchemy
from whoosh.qparser import OrGroup
from whoosh.qparser import AndGroup
from whoosh.qparser import MultifieldParser
from whoosh.analysis import StemmingAnalyzer
import whoosh.index
from whoosh.fields import Schema
#from whoosh.fields import ID, TEXT, KEYWORD, STORED
import heapq
import os
__searchable__ = '__searchable__'
DEFAULT_WHOOSH_INDEX_NAME = 'whoosh_index'
class _QueryProxy(flask_sqlalchemy.BaseQuery):
# We're replacing the model's ``query`` field with this proxy. The main
# thing this proxy does is override the __iter__ method so that results are
# returned in the order of the whoosh score to reflect text-based ranking.
def __init__(self, entities, session=None):
super(_QueryProxy, self).__init__(entities, session)
self._modelclass = self._mapper_zero().class_
self._primary_key_name = self._modelclass.whoosh_primary_key
self._whoosh_searcher = self._modelclass.pure_whoosh
# Stores whoosh results from query. If ``None``, indicates that no
# whoosh query was performed.
self._whoosh_rank = None
def __iter__(self):
''' Reorder ORM-db results according to Whoosh relevance score. '''
super_iter = super(_QueryProxy, self).__iter__()
if self._whoosh_rank is None:
# Whoosh search hasn't been run so behave as normal.
return super_iter
# Iterate through the values and re-order by whoosh relevance.
ordered_by_whoosh_rank = []
for row in super_iter:
# Push items onto heap, where sort value is the rank provided by
# Whoosh
heapq.heappush(ordered_by_whoosh_rank,
(self._whoosh_rank[unicode(getattr(row,
self._primary_key_name))], row))
def _inner():
while ordered_by_whoosh_rank:
yield heapq.heappop(ordered_by_whoosh_rank)[1]
return _inner()
def whoosh_search(self, query, limit=None, fields=None, or_=False):
'''
Execute text query on database. Results have a text-based
match to the query, ranked by the scores from the underlying Whoosh
index.
By default, the search is executed on all of the indexed fields as an
OR conjunction. For example, if a model has 'title' and 'content'
indicated as ``__searchable__``, a query will be checked against both
fields, returning any instance whose title or content are a content
match for the query. To specify particular fields to be checked,
populate the ``fields`` parameter with the desired fields.
By default, results will only be returned if they contain all of the
query terms (AND). To switch to an OR grouping, set the ``or_``
parameter to ``True``.
'''
if not isinstance(query, unicode):
query = unicode(query)
results = self._whoosh_searcher(query, limit, fields, or_)
if not results:
# We don't want to proceed with empty results because we get a
# stderr warning from sqlalchemy when executing 'in_' on empty set.
# However we cannot just return an empty list because it will not
# be a query.
# XXX is this efficient?
return self.filter('null')
result_set = set()
result_ranks = {}
for rank, result in enumerate(results):
pk = result[self._primary_key_name]
result_set.add(pk)
result_ranks[pk] = rank
f = self.filter(getattr(self._modelclass,
self._primary_key_name).in_(result_set))
f._whoosh_rank = result_ranks
return f
class _Searcher(object):
''' Assigned to a Model class as ``pure_search``, which enables
text-querying to whoosh hit list. Also used by ``query.whoosh_search``'''
def __init__(self, primary, indx):
self.primary_key_name = primary
self._index = indx
self.searcher = indx.searcher()
self._all_fields = list(set(indx.schema._fields.keys()) -
set([self.primary_key_name]))
def __call__(self, query, limit=None, fields=None, or_=False):
if fields is None:
fields = self._all_fields
group = OrGroup if or_ else AndGroup
parser = MultifieldParser(fields, self._index.schema, group=group)
return self._index.searcher().search(parser.parse(query),
limit=limit)
def whoosh_index(app, model):
''' Create whoosh index for ``model``, if one does not exist. If
the index exists it is opened and cached. '''
# gets the whoosh index for this model, creating one if it does not exist.
# A dict of model -> whoosh index is added to the ``app`` variable.
if not hasattr(app, 'whoosh_indexes'):
app.whoosh_indexes = {}
return app.whoosh_indexes.get(model.__name__,
_create_index(app, model))
def _create_index(app, model):
# a schema is created based on the fields of the model. Currently we only
# support primary key -> whoosh.ID, and sqlalchemy.(String, Unicode, Text)
# -> whoosh.TEXT.
if not app.config.get('WHOOSH_BASE'):
# XXX todo: is there a better approach to handle the absenSe of a
# config value for whoosh base? Should we throw an exception? If
# so, this exception will be thrown in the after_commit function,
# which is probably not ideal.
app.config['WHOOSH_BASE'] = DEFAULT_WHOOSH_INDEX_NAME
# we index per model.
wi = os.path.join(app.config.get('WHOOSH_BASE'),
model.__name__)
schema, primary_key = _get_whoosh_schema_and_primary_key(model)
if whoosh.index.exists_in(wi):
indx = whoosh.index.open_dir(wi)
else:
if not os.path.exists(wi):
os.makedirs(wi)
indx = whoosh.index.create_in(wi, schema)
app.whoosh_indexes[model.__name__] = indx
model.pure_whoosh = _Searcher(primary_key, indx)
model.whoosh_primary_key = primary_key
# change the query class of this model to our own
model.query_class = _QueryProxy
return indx
def _get_whoosh_schema_and_primary_key(model):
schema = {}
primary = None
searchable = set(model.__searchable__)
for field in model.__table__.columns:
if field.primary_key:
schema[field.name] = whoosh.fields.ID(stored=True, unique=True)
primary = field.name
if field.name in searchable and isinstance(field.type,
(sqlalchemy.types.Text, sqlalchemy.types.String,
sqlalchemy.types.Unicode)):
schema[field.name] = whoosh.fields.TEXT(
analyzer=StemmingAnalyzer())
return Schema(**schema), primary
def _after_flush(app, changes):
# Any db updates go through here. We check if any of these models have
# ``__searchable__`` fields, indicating they need to be indexed. With these
# we update the whoosh index for the model. If no index exists, it will be
# created here; this could impose a penalty on the initial commit of a
# model.
bytype = {} # sort changes by type so we can use per-model writer
for change in changes:
update = change[1] in ('update', 'insert')
if hasattr(change[0].__class__, __searchable__):
bytype.setdefault(change[0].__class__.__name__, []).append((update,
change[0]))
for model, values in bytype.iteritems():
index = whoosh_index(app, values[0][1].__class__)
with index.writer() as writer:
primary_field = values[0][1].pure_whoosh.primary_key_name
searchable = values[0][1].__searchable__
for update, v in values:
if update:
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(v, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format(model, __searchable__, key))
attrs[primary_field] = unicode(getattr(v, primary_field))
writer.update_document(**attrs)
else:
writer.delete_by_term(primary_field, unicode(getattr(v,
primary_field)))
flask_sqlalchemy.models_committed.connect(_after_flush)
# def init_app(db):
# app = db.get_app()
# # for table in db.get_tables_for_bind():
# for item in globals():
#
# #_create_index(app, table)
| mit |
mistercrunch/airflow | airflow/models/sensorinstance.py | 10 | 6417 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from sqlalchemy import BigInteger, Column, Index, Integer, String, Text
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
class SensorInstance(Base):
"""
SensorInstance support the smart sensor service. It stores the sensor task states
and context that required for poking include poke context and execution context.
In sensor_instance table we also save the sensor operator classpath so that inside
smart sensor there is no need to import the dagbag and create task object for each
sensor task.
SensorInstance include another set of columns to support the smart sensor shard on
large number of sensor instance. The key idea is to generate the hash code from the
poke context and use it to map to a shorter shard code which can be used as an index.
Every smart sensor process takes care of tasks whose `shardcode` are in a certain range.
"""
__tablename__ = "sensor_instance"
id = Column(Integer, primary_key=True)
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
execution_date = Column(UtcDateTime, nullable=False)
state = Column(String(20))
_try_number = Column('try_number', Integer, default=0)
start_date = Column(UtcDateTime)
operator = Column(String(1000), nullable=False)
op_classpath = Column(String(1000), nullable=False)
hashcode = Column(BigInteger, nullable=False)
shardcode = Column(Integer, nullable=False)
poke_context = Column(Text, nullable=False)
execution_context = Column(Text)
created_at = Column(UtcDateTime, default=timezone.utcnow(), nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow(), onupdate=timezone.utcnow(), nullable=False)
__table_args__ = (
Index('ti_primary_key', dag_id, task_id, execution_date, unique=True),
Index('si_hashcode', hashcode),
Index('si_shardcode', shardcode),
Index('si_state_shard', state, shardcode),
Index('si_updated_at', updated_at),
)
def __init__(self, ti):
self.dag_id = ti.dag_id
self.task_id = ti.task_id
self.execution_date = ti.execution_date
@staticmethod
def get_classpath(obj):
"""
Get the object dotted class path. Used for getting operator classpath.
:param obj:
:type obj:
:return: The class path of input object
:rtype: str
"""
module_name, class_name = obj.__module__, obj.__class__.__name__
return module_name + "." + class_name
@classmethod
@provide_session
def register(cls, ti, poke_context, execution_context, session=None):
"""
Register task instance ti for a sensor in sensor_instance table. Persist the
context used for a sensor and set the sensor_instance table state to sensing.
:param ti: The task instance for the sensor to be registered.
:type: ti:
:param poke_context: Context used for sensor poke function.
:type poke_context: dict
:param execution_context: Context used for execute sensor such as timeout
setting and email configuration.
:type execution_context: dict
:param session: SQLAlchemy ORM Session
:type session: Session
:return: True if the ti was registered successfully.
:rtype: Boolean
"""
if poke_context is None:
raise AirflowException('poke_context should not be None')
encoded_poke = json.dumps(poke_context)
encoded_execution_context = json.dumps(execution_context)
sensor = (
session.query(SensorInstance)
.filter(
SensorInstance.dag_id == ti.dag_id,
SensorInstance.task_id == ti.task_id,
SensorInstance.execution_date == ti.execution_date,
)
.with_for_update()
.first()
)
if sensor is None:
sensor = SensorInstance(ti=ti)
sensor.operator = ti.operator
sensor.op_classpath = SensorInstance.get_classpath(ti.task)
sensor.poke_context = encoded_poke
sensor.execution_context = encoded_execution_context
sensor.hashcode = hash(encoded_poke)
sensor.shardcode = sensor.hashcode % conf.getint('smart_sensor', 'shard_code_upper_limit')
sensor.try_number = ti.try_number
sensor.state = State.SENSING
sensor.start_date = timezone.utcnow()
session.add(sensor)
session.commit()
return True
@property
def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
database, in all other cases this will be incremented.
"""
# This is designed so that task logs end up in the right file.
if self.state in State.running:
return self._try_number
return self._try_number + 1
@try_number.setter
def try_number(self, value):
self._try_number = value
def __repr__(self):
return (
"<{self.__class__.__name__}: id: {self.id} poke_context: {self.poke_context} "
"execution_context: {self.execution_context} state: {self.state}>".format(self=self)
)
| apache-2.0 |
Anonymous-X6/django | django/db/backends/mysql/base.py | 323 | 15548 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value, conv):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The MySQL database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
datetime.datetime: adapt_datetime_warn_on_aware_datetime,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| bsd-3-clause |
q40223241/2015cdb_g3_40223241 | cadb_g3_0420-master/static/Brython3.1.1-20150328-091302/Lib/site-packages/spur.py | 184 | 4974 | #coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
| gpl-3.0 |
chiotlune/ext | gnuradio-3.7.0.1/gr-digital/examples/narrowband/benchmark_rx.py | 4 | 4751 | #!/usr/bin/env python
#
# Copyright 2010,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
# From gr-digital
from gnuradio import digital
# from current dir
from receive_path import receive_path
from uhd_interface import uhd_receiver
import struct
import sys
#import os
#print os.getpid()
#raw_input('Attach and press enter: ')
class my_top_block(gr.top_block):
def __init__(self, demodulator, rx_callback, options):
gr.top_block.__init__(self)
if(options.rx_freq is not None):
# Work-around to get the modulation's bits_per_symbol
args = demodulator.extract_kwargs_from_options(options)
symbol_rate = options.bitrate / demodulator(**args).bits_per_symbol()
self.source = uhd_receiver(options.args, symbol_rate,
options.samples_per_symbol,
options.rx_freq, options.rx_gain,
options.spec, options.antenna,
options.verbose)
options.samples_per_symbol = self.source._sps
elif(options.from_file is not None):
sys.stderr.write(("Reading samples from '%s'.\n\n" % (options.from_file)))
self.source = blocks.file_source(gr.sizeof_gr_complex, options.from_file)
else:
sys.stderr.write("No source defined, pulling samples from null source.\n\n")
self.source = blocks.null_source(gr.sizeof_gr_complex)
# Set up receive path
# do this after for any adjustments to the options that may
# occur in the sinks (specifically the UHD sink)
self.rxpath = receive_path(demodulator, rx_callback, options)
self.connect(self.source, self.rxpath)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
global n_rcvd, n_right
def main():
global n_rcvd, n_right
n_rcvd = 0
n_right = 0
def rx_callback(ok, payload):
global n_rcvd, n_right
(pktno,) = struct.unpack('!H', payload[0:2])
n_rcvd += 1
if ok:
n_right += 1
print "ok = %5s pktno = %4d n_rcvd = %4d n_right = %4d" % (
ok, pktno, n_rcvd, n_right)
demods = digital.modulation_utils.type_1_demods()
# Create Options Parser:
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=demods.keys(),
default='psk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(demods.keys()),))
parser.add_option("","--from-file", default=None,
help="input file of samples to demod")
receive_path.add_options(parser, expert_grp)
uhd_receiver.add_options(parser)
for mod in demods.values():
mod.add_options(expert_grp)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
if options.from_file is None:
if options.rx_freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
# build the graph
tb = my_top_block(demods[options.modulation], rx_callback, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: Failed to enable realtime scheduling."
tb.start() # start flow graph
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-2.0 |
kennedyshead/home-assistant | tests/components/uk_transport/test_sensor.py | 5 | 3165 | """The tests for the uk_transport platform."""
import re
from unittest.mock import patch
import requests_mock
from homeassistant.components.uk_transport.sensor import (
ATTR_ATCOCODE,
ATTR_CALLING_AT,
ATTR_LOCALITY,
ATTR_NEXT_BUSES,
ATTR_NEXT_TRAINS,
ATTR_STATION_CODE,
ATTR_STOP_NAME,
CONF_API_APP_ID,
CONF_API_APP_KEY,
UkTransportSensor,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import now
from tests.common import load_fixture
BUS_ATCOCODE = "340000368SHE"
BUS_DIRECTION = "Wantage"
TRAIN_STATION_CODE = "WIM"
TRAIN_DESTINATION_NAME = "WAT"
VALID_CONFIG = {
"sensor": {
"platform": "uk_transport",
CONF_API_APP_ID: "foo",
CONF_API_APP_KEY: "ebcd1234",
"queries": [
{"mode": "bus", "origin": BUS_ATCOCODE, "destination": BUS_DIRECTION},
{
"mode": "train",
"origin": TRAIN_STATION_CODE,
"destination": TRAIN_DESTINATION_NAME,
},
],
}
}
async def test_bus(hass):
"""Test for operational uk_transport sensor with proper attributes."""
with requests_mock.Mocker() as mock_req:
uri = re.compile(UkTransportSensor.TRANSPORT_API_URL_BASE + "*")
mock_req.get(uri, text=load_fixture("uk_transport_bus.json"))
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
bus_state = hass.states.get("sensor.next_bus_to_wantage")
assert None is not bus_state
assert bus_state.name == f"Next bus to {BUS_DIRECTION}"
assert bus_state.attributes[ATTR_ATCOCODE] == BUS_ATCOCODE
assert bus_state.attributes[ATTR_LOCALITY] == "Harwell Campus"
assert bus_state.attributes[ATTR_STOP_NAME] == "Bus Station"
assert len(bus_state.attributes.get(ATTR_NEXT_BUSES)) == 2
direction_re = re.compile(BUS_DIRECTION)
for bus in bus_state.attributes.get(ATTR_NEXT_BUSES):
assert None is not bus
assert None is not direction_re.search(bus["direction"])
async def test_train(hass):
"""Test for operational uk_transport sensor with proper attributes."""
with requests_mock.Mocker() as mock_req, patch(
"homeassistant.util.dt.now", return_value=now().replace(hour=13)
):
uri = re.compile(UkTransportSensor.TRANSPORT_API_URL_BASE + "*")
mock_req.get(uri, text=load_fixture("uk_transport_train.json"))
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
train_state = hass.states.get("sensor.next_train_to_WAT")
assert None is not train_state
assert train_state.name == f"Next train to {TRAIN_DESTINATION_NAME}"
assert train_state.attributes[ATTR_STATION_CODE] == TRAIN_STATION_CODE
assert train_state.attributes[ATTR_CALLING_AT] == TRAIN_DESTINATION_NAME
assert len(train_state.attributes.get(ATTR_NEXT_TRAINS)) == 25
assert (
train_state.attributes[ATTR_NEXT_TRAINS][0]["destination_name"]
== "London Waterloo"
)
assert train_state.attributes[ATTR_NEXT_TRAINS][0]["estimated"] == "06:13"
| apache-2.0 |
jeongchanKim/TizenRT | external/protobuf/python/google/protobuf/reflection.py | 56 | 4562 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = '[email protected] (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import message
if api_implementation.Type() == 'cpp':
from google.protobuf.pyext import cpp_message as message_impl
else:
from google.protobuf.internal import python_message as message_impl
# The type of all Message classes.
# Part of the public interface, but normally only used by message factories.
GeneratedProtocolMessageType = message_impl.GeneratedProtocolMessageType
MESSAGE_CLASS_CACHE = {}
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg
def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
if descriptor in MESSAGE_CLASS_CACHE:
return MESSAGE_CLASS_CACHE[descriptor]
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
result = GeneratedProtocolMessageType(
str(descriptor.name), (message.Message,), attributes)
MESSAGE_CLASS_CACHE[descriptor] = result
return result
| apache-2.0 |
akarol/cfme_tests | cfme/tests/services/test_pxe_service_catalogs.py | 1 | 5646 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.common.provider import cleanup_vm
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config, get_template_from_config
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils import testgen
from cfme.utils.conf import cfme_data
from cfme.utils.log import logger
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('vm_name', 'uses_infra_providers'),
test_requirements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
])
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if args['provider'].type == "scvmm":
continue
pxe_server_name = args['provider'].data['provisioning']['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = args['provider'].data['provisioning']['pxe_kickstart']
if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.yield_fixture(scope="function")
def catalog_item(provider, vm_name, dialog, catalog, provisioning, setup_pxe_servers_vm_prov):
# generate_tests makes sure these have values
pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\
pxe_image_type, pxe_vlan = map(
provisioning.get, (
'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
)
provisioning_data = {
'catalog': {'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image},
'vm_name': vm_name,
},
'environment': {'datastore_name': {'name': datastore},
'host_name': {'name': host},
},
'customize': {'root_password': pxe_root_password,
'custom_template': {'name': pxe_kickstart},
},
'network': {'vlan': partial_match(pxe_vlan),
},
}
item_name = fauxfactory.gen_alphanumeric()
catalog_item = CatalogItem(item_type=provider.catalog_name, name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, catalog_name=pxe_template,
provider=provider, prov_data=provisioning_data)
yield catalog_item
@pytest.mark.rhv1
@pytest.mark.usefixtures('setup_pxe_servers_vm_prov')
def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV PXE service catalog
Metadata:
test_flag: pxe, provision
"""
vm_name = catalog_item.provisioning_data['catalog']["vm_name"]
request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
catalog_item.create()
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request(num_sec=3600)
msg = "Provisioning failed with the message {}".format(provision_request.rest.message)
assert provision_request.is_succeeded(), msg
| gpl-2.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/test/test_richcmp.py | 55 | 11262 | # Tests for rich comparisons
import unittest
from test import test_support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise test_support.TestFailed, "Number.__cmp__() should not be called"
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __nonzero__(self):
raise TypeError, "Vectors cannot be used in Boolean contexts"
def __cmp__(self, other):
raise test_support.TestFailed, "Vector.__cmp__() should not be called"
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError, "Cannot compare vectors of different length"
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in xrange(len(realres)):
# results are bool, so we can use "is" here
self.assert_(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = range(5)
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.itervalues():
for op in ops:
# calls __nonzero__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in xrange(3):
for b in xrange(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.itervalues():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assert_(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self, other): return 0
def __gt__(self, other): return 0
def __eq__(self, other): return 0
def __le__(self, other): raise TestFailed, "This shouldn't happen"
def __ge__(self, other): raise TestFailed, "This shouldn't happen"
def __ne__(self, other): raise TestFailed, "This shouldn't happen"
def __cmp__(self, other): raise RuntimeError, "expected"
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
self.assertRaises(RuntimeError, cmp, a, b)
def test_not(self):
# Check that exceptions in __nonzero__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __nonzero__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from UserList import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assert_(not (a == b))
self.assert_(a != b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
a.append(17)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assert_(not (a == b))
self.assert_(a != b)
self.assert_(a < b)
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = imag1a.items()
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assert_(imag1a == imag1a)
self.assert_(imag1a == imag1b)
self.assert_(imag2 == imag2)
self.assert_(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def assertIs(self, a, b):
self.assert_(a is b)
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
test_support.run_unittest(VectorTest, NumberTest, MiscTest, DictTest, ListTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
obimod/taiga-back | taiga/projects/issues/migrations/0003_auto_20141210_1108.py | 13 | 1037 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db import connection
from taiga.projects.userstories.models import *
from taiga.projects.tasks.models import *
from taiga.projects.issues.models import *
from taiga.projects.models import *
def _fix_tags_model(tags_model):
table_name = tags_model._meta.db_table
query = "select id from (select id, unnest(tags) tag from %s) x where tag LIKE '%%,%%'"%(table_name)
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
id = row[0]
instance = tags_model.objects.get(id=id)
instance.tags = [tag.replace(",", "") for tag in instance.tags]
instance.save()
def fix_tags(apps, schema_editor):
print("Fixing user issue tags")
_fix_tags_model(Issue)
class Migration(migrations.Migration):
dependencies = [
('issues', '0002_issue_external_reference'),
]
operations = [
migrations.RunPython(fix_tags),
]
| agpl-3.0 |
jorik041/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
navcoindev/navcoin-core | qa/rpc-tests/cfund-rawtx-create-proposal.py | 1 | 24704 | #!/usr/bin/env python3
# Copyright (c) 2018 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class CommunityFundRawTXCreateProposalTest(NavCoinTestFramework):
"""Tests the state transition of proposals of the Community fund."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.goodDescription = "these are not the NAV Droids you are looking for"
self.goodDuration = 360000
self.goodAmount = 100
self.goodPropHash = ""
self.goodAddress = ""
def setup_network(self, split=False):
self.all_desc_text_options()
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
self.nodes[0].staking(False)
activate_cfund(self.nodes[0])
# creates a good proposal and sets things we use later
self.test_happy_path()
# test incorrect amounts
self.test_invalid_proposal(self.goodAddress, -100, self.goodDuration, "I should not work")
self.test_invalid_proposal(self.goodAddress, -1, self.goodDuration, "I should not work")
self.test_invalid_proposal(self.goodAddress, 0, self.goodDuration, "I should not work")
self.test_invalid_proposal(self.goodAddress, "", self.goodDuration, "I should not work")
# test incorrect duration
self.test_invalid_proposal(self.goodAddress, self.goodAmount, 0, "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, -13838, "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, True, "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, False, "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, "dsf", "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, "36000", "I should not work")
self.test_invalid_proposal(self.goodAddress, self.goodAmount, "", "I should not work")
# test invalid address
self.test_invalid_proposal("", self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal("a", self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal("1KFHE7w8BhaENAswwryaoccDb6qcT6DbYY", self.goodAmount, self.goodDuration, "I should not work") # bitcoin address
self.test_invalid_proposal("NPyEJsv82GaguVsY3Ur4pu4WwnFCsYQ94g", self.goodAmount, self.goodDuration, "I should not work") # nav address we don't own
self.test_invalid_proposal(False, self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal(True, self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal(8888, self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal(-8888, self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal(0, self.goodAmount, self.goodDuration, "I should not work")
self.test_invalid_proposal(1, self.goodAmount, self.goodDuration, "I should not work")
# test invalid descriptions
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, self.descTxtToLong)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, 800)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, True)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, False)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, -100)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, 0)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, 1)
self.test_invalid_proposal(self.goodAddress, self.goodAmount, self.goodDuration, -1)
self.test_valid_description(self.descTxtWhiteSpace, 2)
self.test_valid_description(self.descTxtMaxLength, 3)
self.test_valid_description(self.descTxtAllCharsAtoB, 4)
self.test_valid_description(self.descTxtAllCharsCtoE, 5)
self.test_valid_description(self.descTxtAllCharsCtoE, 6)
self.test_valid_description(self.descTxtAllCharsFtoG, 7)
self.test_valid_description(self.descTxtAllCharsHtoK, 8)
self.test_valid_description(self.descTxtAllCharsLtoN, 9)
self.test_valid_description(self.descTxtAllCharsOtoP, 10)
self.test_valid_description(self.descTxtAllCharsQtoS, 11)
self.test_valid_description(self.descTxtAllCharsTtoU, 12)
self.test_valid_description(self.descTxtAllCharsVtoZ, 13)
self.test_valid_description(self.descTxtAllCharsArrows1, 14)
self.test_valid_description(self.descTxtAllCharsArrows2, 15)
self.test_valid_description(self.descTxtAllCharsArrows3, 16)
self.test_valid_description(self.descTxtAllCharsClassic1, 17)
self.test_valid_description(self.descTxtAllCharsClassic2, 18)
self.test_valid_description(self.descTxtAllCharsCurrency, 19)
self.test_valid_description(self.descTxtAllCharsShapes1, 20)
self.test_valid_description(self.descTxtAllCharsShapes2, 21)
self.test_valid_description(self.descTxtAllCharsShapes3, 22)
self.test_valid_description(self.descTxtAllCharsShapes4, 23)
self.test_valid_description(self.descTxtAllCharsShapes4, 24)
self.test_valid_description(self.descTxtAllCharsMath1, 25)
self.test_valid_description(self.descTxtAllCharsMath2, 26)
self.test_valid_description(self.descTxtAllCharsMath3, 27)
self.test_valid_description(self.descTxtAllCharsMath4, 28)
self.test_valid_description(self.descTxtAllCharsNumerals1, 29)
self.test_valid_description(self.descTxtAllCharsNumerals2, 30)
self.test_valid_description(self.descTxtAllCharsPunch1, 31)
self.test_valid_description(self.descTxtAllCharsPunch2, 32)
self.test_valid_description(self.descTxtAllCharsSymbol1, 33)
self.test_valid_description(self.descTxtAllCharsSymbol2, 34)
self.test_valid_description(self.descTxtAllCharsSymbol3, 35)
# i = 4
# for char in self.descTxtAllCharsAtoE:
# i = i + 1
# self.test_desc_should_succeed(char, i)
def test_invalid_proposal(self, address, amount, duration, description):
# Create new payment request for more than the amount
proposal = ""
callSucceed = False
try:
proposal = self.send_raw_proposalrequest(address, amount, duration, description)
#print(proposal)
callSucceed = True
except :
pass
assert(proposal == "")
assert(callSucceed is False)
#check a gen - should still only have the last good prop
blocks = slow_gen(self.nodes[0], 1)
proposal_list = self.nodes[0].listproposals()
#should still only have 1 proposal from the good test run
assert(len(proposal_list) == 1)
self.check_good_proposal(proposal_list[0])
def test_valid_description(self, descriptionTxt, proposal_list_len):
duration = 360000
amount = 100
# Create new payment request for more than the amount
propHash = ""
callSucceed = True
#print("Test Description: -------------------------")
#print(descriptionTxt)
try:
propHash = self.send_raw_proposalrequest(self.goodAddress, self.goodAmount, self.goodDuration, descriptionTxt)
#print(propHash)
except Exception as e:
print(e)
callSucceed = False
assert(propHash != "")
assert (callSucceed is True)
# check a gen - should still only have the last good prop
blocks = slow_gen(self.nodes[0], 1)
proposal_list = self.nodes[0].listproposals()
# should still only have the correct amount of proposals from the other runs
assert(len(proposal_list) == proposal_list_len)
# find the proposal we just made and test the description
proposal_found = False
for proposal in proposal_list:
if proposal['hash'] == propHash:
proposal_found = True
assert(proposal['description'] == descriptionTxt)
assert(proposal_found)
# Test everything the way it should be
def test_happy_path(self):
self.goodAddress = self.nodes[0].getnewaddress()
self.goodPropHash = self.send_raw_proposalrequest(self.goodAddress, self.goodAmount, self.goodDuration, self.goodDescription)
blocks = slow_gen(self.nodes[0], 1)
proposal_list = self.nodes[0].listproposals()
# Should only have 1 proposal
assert(len(proposal_list) == 1)
# The proposal should have all the same required fields
assert (proposal_list[0]['blockHash'] == blocks[0])
self.check_good_proposal(proposal_list[0])
def check_good_proposal(self, proposal):
assert (proposal['version'] == 2)
assert (proposal['paymentAddress'] == self.goodAddress)
assert (proposal['proposalDuration'] == self.goodDuration)
assert (proposal['description'] == self.goodDescription)
assert (proposal['votesYes'] == 0)
assert (proposal['votesNo'] == 0)
assert (proposal['status'] == 'pending')
assert (proposal['state'] == 0)
assert (proposal['hash'] == self.goodPropHash)
assert (float(proposal['requestedAmount']) == float(self.goodAmount))
assert (float(proposal['notPaidYet']) == float(self.goodAmount))
assert (float(proposal['userPaidFee']) == float(1000))
def send_raw_proposalrequest(self, address, amount, time, description):
amount = amount * 100000000
# Create a raw proposal tx
raw_proposal_tx = self.nodes[0].createrawtransaction(
[],
{"6ac1": 1000},
json.dumps({"v": 2, "n": amount, "a": address, "d": time, "s": description})
)
# Modify version
raw_proposal_tx = "04" + raw_proposal_tx[2:]
# Fund raw transaction
raw_proposal_tx = self.nodes[0].fundrawtransaction(raw_proposal_tx)['hex']
# Sign raw transaction
raw_proposal_tx = self.nodes[0].signrawtransaction(raw_proposal_tx)['hex']
# Send raw transaction
return self.nodes[0].sendrawtransaction(raw_proposal_tx)
def all_desc_text_options(self):
self.descTxtToLong = "LOOOOONNNNNGGGG, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque semper justo ac neque mollis, a cursus nisl placerat. Aliquam ipsum quam, congue vitae vulputate id, ullamcorper vel libero. Phasellus et tristique justo. Curabitur eu porta magna, vitae auctor libero. Fusce tellus ipsum, aliquet nec consequat ut, dictum eget libero. Maecenas eu velit quam. Nunc ac libero in purus vestibulum feugiat quis nec urna. Donec faucibus consequat dignissim. Donec ornare turpis nec lobortis vestibulum. Vivamus lobortis vel massa ac ultrices. Ut vel eros in elit vehicula luctus vel vitae justo. Praesent quis semper nisi. Vivamus viverra blandit ex. Sed nec fringilla quam. Nulla condimentum rhoncus erat sit amet vulputate. Phasellus viverra sagittis consequat. Sed dapibus augue ac enim dignissim, at consequat arcu ornare. Vestibulum facilisis pretium aliquet. asdfjasdlkfjhadsfkjhasdkjhakjdhfaskjdakjsdhf xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
self.descTxtMaxLength ="IM LOOOOONNNNNGGGG, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque semper justo ac neque mollis, a cursus nisl placerat. Aliquam ipsum quam, congue vitae vulputate id, ullamcorper vel libero. Phasellus et tristique justo. Curabitur eu porta magna, vitae auctor libero. Fusce tellus ipsum, aliquet nec consequat ut, dictum eget libero. Maecenas eu velit quam. Nunc ac libero in purus vestibulum feugiat quis nec urna. Donec faucibus consequat dignissim. Donec ornare turpis nec lobortis vestibulum. Vivamus lobortis vel massa ac ultrices. Ut vel eros in elit vehicula luctus vel vitae justo. Praesent quis semper nisi. Vivamus viverra blandit ex. Sed nec fringilla quam. Nulla condimentum rhoncus erat sit amet vulputate. Phasellus viverra sagittis consequat. Sed dapibus augue ac enim dignissim, at consequat arcu ornare. Vestibulum facilisis pretium aliquet. xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxabc"
self.descTxtAllCharsAtoB = "Ⓐ ⓐ ⒜ A a Ạ ạ Å å Ä ä Ả ả Ḁ ḁ Ấ ấ Ầ ầ Ẩ ẩ Ȃ ȃ Ẫ ẫ Ậ ậ Ắ ắ Ằ ằ Ẳ ẳ Ẵ ẵ Ặ ặ Ā ā Ą ą Ȁ ȁ Ǻ ǻ Ȧ ȧ Á á Ǟ ǟ Ǎ ǎ À à Ã ã Ǡ ǡ Â â Ⱥ ⱥ Æ æ Ǣ ǣ Ǽ ǽ Ɐ Ꜳ ꜳ Ꜹ ꜹ Ꜻ ꜻ Ɑ ℀ ⅍ ℁ ª Ⓑ ⓑ ⒝ B b Ḃ ḃ Ḅ ḅ Ḇ ḇ Ɓ Ƀ ƀ Ƃ ƃ Ƅ ƅ ℬ"
self.descTxtAllCharsCtoE = "Ⓒ ⓒ ⒞ C c Ḉ ḉ Ć ć Ĉ ĉ Ċ ċ Č č Ç ç Ƈ ƈ Ȼ ȼ ℂ ℃ Ɔ Ꜿ ꜿ ℭ ℅ ℆ ℄ Ⓓ ⓓ ⒟ D d Ḋ ḋ Ḍ ḍ Ḏ ḏ Ḑ ḑ Ḓ ḓ Ď ď Ɗ Ƌ ƌ Ɖ Đ đ ȡ DZ Dz dz DŽ Dž dž ȸ ⅅ ⅆ Ⓔ ⓔ ⒠ E e Ḕ ḕ Ḗ ḗ Ḙ ḙ Ḛ ḛ Ḝ ḝ Ẹ ẹ Ẻ ẻ Ế ế Ẽ ẽ Ề ề Ể ể Ễ ễ Ệ ệ Ē ē Ĕ ĕ Ė ė Ę ę Ě ě È è É é Ê ê Ë ë Ȅ ȅ Ȩ ȩ Ȇ ȇ Ǝ ⱻ Ɇ ɇ Ə ǝ ℰ ⱸ ℯ ℮ ℇ Ɛ"
self.descTxtAllCharsFtoG = "Ⓕ ⓕ ⒡ F f Ḟ ḟ Ƒ ƒ ꜰ Ⅎ ⅎ ꟻ ℱ ℻ Ⓖ ⓖ ⒢ G g Ɠ Ḡ ḡ Ĝ ĝ Ğ ğ Ġ ġ Ǥ ǥ Ǧ ǧ Ǵ ℊ ⅁ ǵ Ģ ģ"
self.descTxtAllCharsHtoK = "Ⓗ ⓗ ⒣ H h Ḣ ḣ Ḥ ḥ Ḧ ḧ Ḩ ḩ Ḫ ḫ Ĥ ĥ Ȟ ȟ Ħ ħ Ⱨ ⱨ Ꜧ ℍ Ƕ ẖ ℏ ℎ ℋ ℌ ꜧ Ⓘ ⓘ ⒤ I i Ḭ ḭ Ḯ ḯ IJ ij Í í Ì ì Î î Ï ï Ĩ ĩ Ī ī Ĭ ĭ Į į Ǐ ǐ ı ƚ Ỻ ⅈ ⅉ ℹ ℑ ℐ Ⓙ ⓙ ⒥ J j Ĵ ĵ Ɉ ɉ ȷ ⱼ ǰ Ⓚ ⓚ ⒦ K k Ḱ ḱ Ḳ ḳ Ḵ ḵ Ķ ķ Ƙ ƙ Ꝁ ꝁ Ꝃ ꝃ Ꝅ ꝅ Ǩ ǩ Ⱪ ⱪ ĸ "
self.descTxtAllCharsLtoN = "Ⓛ ⓛ ⒧ L l Ḷ ḷ Ḹ ḹ Ḻ ḻ Ḽ ḽ Ĺ ĺ Ļ ļ Ľ ľ Ŀ ŀ Ł ł Ỉ ỉ Ⱡ ⱡ Ƚ ꝉ Ꝉ Ɫ LJ Lj lj Ị İ ị ꞁ ⅃ ⅂ Ȉ ȉ Ȋ ȋ ℓ ℒ Ⓜ ⓜ ⒨ M m Ḿ ḿ Ṁ ṁ Ṃ ṃ ꟿ ꟽ Ɱ Ɯ ℳ Ⓝ ⓝ ⒩ N n Ṅ ṅ Ṇ ṇ Ṉ ṉ Ṋ ṋ Ń ń Ņ ņ Ň ň Ǹ ǹ Ñ ñ Ƞ ƞ Ŋ ŋ Ɲ ʼn NJ Nj nj ȵ ℕ №"
self.descTxtAllCharsOtoP = "Ⓞ ⓞ ⒪ O o Ö ö Ṏ ṏ Ṍ ṍ Ṑ ṑ Ṓ ṓ Ȫ ȫ Ȭ ȭ Ȯ ȯ Ȱ ȱ Ǫ ǫ Ǭ ǭ Ọ ọ Ỏ ỏ Ố ố Ồ ồ Ổ ổ Ỗ ỗ Ộ ộ Ớ ớ Ờ ờ Ở ở Ỡ ỡ Ợ ợ Ơ ơ Ō ō Ŏ ŏ Ő ő Ò ò Ó ó Ô ô Õ õ Ǒ ǒ Ȍ ȍ Ȏ ȏ Œ œ Ø ø Ǿ ǿ Ꝋ Ꝏ ꝏ ⍥ ⍤ ℴ Ⓟ ⓟ ⒫ ℗ P p Ṕ ṕ Ṗ ṗ Ƥ ƥ Ᵽ ℙ Ƿ "
self.descTxtAllCharsQtoS = "ꟼ ℘ Ⓠ ⓠ ⒬ Q q Ɋ ɋ ℚ ℺ ȹ Ⓡ ⓡ ⒭ R r Ŕ ŕ Ŗ ŗ Ř ř Ṙ ṙ Ṛ ṛ Ṝ ṝ Ṟ ṟ Ȑ ȑ Ȓ ȓ Ɍ ɍ Ʀ Ꝛ ꝛ Ɽ ℞ ℜ ℛ ℟ ℝ Ⓢ ⓢ ⒮ S s Ṡ ṡ Ṣ ṣ Ṥ ṥ Ṧ ṧ Ṩ ṩ Ś ś Ŝ ŝ Ş ş Š š Ș ș ȿ ꜱ Ƨ ƨ Ϩ ϩ ẞ ß ẛ ẜ ẝ ℠"
self.descTxtAllCharsTtoU = "Ⓣ ⓣ ⒯ T t Ṫ ṫ Ṭ ṭ Ṯ ṯ Ṱ ṱ Ţ ţ Ť ť Ŧ ŧ Ț ț Ⱦ ⱦ Ƭ Ʈ ƫ ƭ ẗ ȶ ℡ ™ Ⓤ ⓤ ⒰ U u Ṳ ṳ Ṵ ṵ Ṷ ṷ Ṹ ṹ Ṻ ṻ Ủ ủ Ụ ụ Ứ ứ Ừ ừ Ử ử Ữ ữ Ự ự Ũ ũ Ū ū Ŭ ŭ Ů ů Ű ű Ǚ ǚ Ǘ ǘ Ǜ ǜ Ų ų Ǔ ǔ Ȕ ȕ Û û Ȗ ȗ Ù ù Ú ú Ü ü Ư ư Ʉ Ʋ Ʊ"
self.descTxtAllCharsVtoZ = "Ⓥ ⓥ ⒱ V v Ṽ ṽ Ṿ ṿ Ʌ ℣ Ỽ ⱱ ⱴ ⱽ Ⓦ ⓦ ⒲ W w Ẁ ẁ Ẃ ẃ Ẅ ẅ Ẇ ẇ Ẉ ẉ Ŵ ŵ Ⱳ ⱳ Ϣ ϣ ẘ Ⓧ ⓧ ⒳ X x Ẋ ẋ Ẍ ẍ ℵ × Ⓨ ⓨ ⒴ y Y Ẏ ẏ Ỿ ỿ Ỳ ỳ Ỵ ỵ Ỷ ỷ Ỹ ỹ Ŷ ŷ Ƴ ƴ Ÿ ÿ Ý ý Ɏ ɏ Ȳ ȳ Ɣ ẙ ⅄ ℽ Ⓩ ⓩ ⒵ Z z Ẑ ẑ Ẓ ẓ Ẕ ẕ Ź ź Ż ż Ž ž Ȥ ȥ Ⱬ ⱬ Ƶ ƶ ɀ ℨ ℤ"
self.descTxtAllCharsArrows1 = "↪ ↩ ← ↑ → ↓ ↔ ↕ ↖ ↗ ↘ ↙ ↚ ↛ ↜ ↝ ↞ ↟ ↠ ↡ ↢ ↣ ↤ ↦ ↥ ↧ ↨ ↫ ↬ ↭ ↮ ↯ ↰ ↱ ↲ ↴ ↳ ↵ ↶ ↷ ↸ ↹ ↺ ↻ ⟲ ⟳ ↼ ↽ ↾ ↿ ⇀ ⇁ ⇂ ⇃ ⇄ ⇅ ⇆ ⇇ ⇈ ⇉ ⇊ ⇋ ⇌ ⇍ ⇏ ⇎ ⇑ ⇓ ⇐ ⇒ ⇔ ⇕ ⇖ ⇗ ⇘ ⇙ ⇳ ⇚ ⇛ ⇜ ⇝ ⇞ ⇟ ⇠ ⇡ ⇢ ⇣ ⇤ ⇥ ⇦ ⇨ ⇩ ⇪ ⇧ ⇫ ⇬ ⇭ ⇮ ⇯ ⇰ ⇱ ⇲ ⇴ ⇵ ⇶ ⇷ ⇸ ⇹ ⇺ ⇻ ⇼ ⇽ ⇾ ⇿ ⟰ ⟱ ⟴ ⟵ ⟶ ⟷ ⟸ ⟹ ⟽ ⟾ ⟺ ⟻ ⟼ ⟿"
self.descTxtAllCharsArrows2 = "⤀ ⤁ ⤅ ⤂ ⤃ ⤄ ⤆ ⤇ ⤈ ⤉ ⤊ ⤋ ⤌ ⤍ ⤎ ⤏ ⤐ ⤑ ⤒ ⤓ ⤔ ⤕ ⤖ ⤗ ⤘ ⤙ ⤚ ⤛ ⤜ ⤝ ⤞ ⤟ ⤠ ⤡ ⤢ ⤣ ⤤ ⤥ ⤦ ⤧ ⤨ ⤩ ⤪ ⤭ ⤮ ⤯ ⤰ ⤱ ⤲ ⤳ ⤻ ⤸ ⤾ ⤿ ⤺ ⤼ ⤽ ⤴ ⤵ ⤶ ⤷ ⤹ ⥀ ⥁ ⥂ ⥃ ⥄ ⥅ ⥆ ⥇ ⥈ ⥉ ⥒ ⥓ ⥔ ⥕ ⥖ ⥗ ⥘ ⥙ ⥚ ⥛ ⥜"
self.descTxtAllCharsArrows3 = "⥝ ⥞ ⥟ ⥠ ⥡ ⥢ ⥣ ⥤ ⥥ ⥦ ⥧ ⥨ ⥩ ⥪ ⥫ ⥬ ⥭ ⥮ ⥯ ⥰ ⥱ ⥲ ⥳ ⥴ ⥵ ⥶ ⥷ ⥸ ⥹ ⥺ ⥻ ➔ ➘ ➙ ➚ ➛ ➜ ➝ ➞ ➟ ➠ ➡ ➢ ➣ ➤ ➥ ➦ ➧ ➨ ➩ ➪ ➫ ➬ ➭ ➮ ➯ ➱ ➲ ➳ ➴ ➵ ➶ ➷ ➸ ➹ ➺ ➻ ➼ ➽ ➾ ⬀ ⬁ ⬂ ⬃ ⬄ ⬅ ⬆ ⬇ ⬈ ⬉ ⬊ ⬋ ⬌ ⬍ ⏎ ▲ ▼ ◀ ▶ ⬎ ⬏ ⬐ ⬑ ☇ ☈ ⍃ ⍄ ⍇ ⍈ ⍐ ⍗ ⍌ ⍓ ⍍ ⍔ ⍏ ⍖ ⍅ ⍆"
self.descTxtAllCharsClassic1 = "⌘ « » ‹ › ‘ ’ “ ” „ ‚ ❝ ❞ £ ¥ € $ ¢ ¬ ¶ @ § ® © ™ ° × π ± √ ‰ Ω ∞ ≈ ÷ ~ ≠ ¹ ² ³ ½ ¼ ¾ ‐ – — | ⁄ \ [ ] { } † ‡ … · • ● ⌥ ⌃ ⇧ ↩ ¡ ¿ ‽ ⁂ ∴ ∵ ◊ ※ ← → ↑ ↓ ☜ ☞ ☝ ☟ ✔ ★ ☆ ♺ ☼ ☂ ☺ ☹ ☃ ✉ ✿"
self.descTxtAllCharsClassic2 = "✄ ✈ ✌ ✎ ♠ ♦ ♣ ♥ ♪ ♫ ♯ ♀ ♂ α ß Á á À à Å å Ä ä Æ æ Ç ç É é È è Ê ê Í í Ì ì Î î Ñ ñ Ó ó Ò ò Ô ô Ö ö Ø ø Ú ú Ù ù Ü ü Ž ž"
self.descTxtAllCharsCurrency = "₳ ฿ ¢ ₡ ¢ ₢ ₵ ₫ € £ £ ₤ ₣ ƒ ₲ ₭ ₥ ₦ ₱ $ $ ₮ ₩ ₩ ¥ ¥ ₴ ¤ ₰ ៛ ₪ ₯ ₠ ₧ ₨ ௹ ﷼ ㍐ ৲ ৳ ₹"
self.descTxtAllCharsShapes1 = "▲ ▼ ◀ ▶ ◢ ◣ ◥ ◤ △ ▽ ◿ ◺ ◹ ◸ ▴ ▾ ◂ ▸ ▵ ▿ ◃ ▹ ◁ ▷ ◅ ▻ ◬ ⟁ ⧋ ⧊ ⊿ ∆ ∇ ◭ ◮ ⧩ ⧨ ⌔ ⟐ ◇ ◆ ◈ ⬖ ⬗ ⬘ ⬙ ⬠ ⬡ ⎔ ⋄ ◊ ⧫ ⬢ ⬣ ▰ ▪ ◼ ▮ ◾ ▗ ▖ ■ ∎ ▃ ▄ ▅ ▆ ▇ █ ▌ ▐ ▍ ▎ ▉ ▊ ▋ ❘ ❙ ❚ ▀ ▘ ▝ ▙ ▚ ▛ ▜ ▟ ▞ ░ ▒ ▓ ▂ ▁ ▬ ▔ ▫ ▯ ▭ ▱ ◽ □ ◻ ▢ ⊞ ⊡ ⊟ ⊠ ▣ "
self.descTxtAllCharsShapes2 = "▤ ▥ ▦ ⬚ ▧ ▨ ▩ ⬓ ◧ ⬒ ◨ ◩ ◪ ⬔ ⬕ ❏ ❐ ❑ ❒ ⧈ ◰ ◱ ◳ ◲ ◫ ⧇ ⧅ ⧄ ⍁ ⍂ ⟡ ⧉ ○ ◌ ◍ ◎ ◯ ❍ ◉ ⦾ ⊙ ⦿ ⊜ ⊖ ⊘ ⊚ ⊛ ⊝ ● ⚫ ⦁ ◐ ◑ ◒ ◓ ◔ ◕ ⦶ ⦸ ◵ ◴ ◶ ◷ ⊕ ⊗ ⦇ ⦈ ⦉ ⦊ ❨ ❩ ⸨ ⸩ ◖ ◗ ❪ ❫ ❮ ❯ ❬ ❭ ❰ ❱ ⊏ ⊐ ⊑ ⊒ ◘ ◙ ◚ ◛"
self.descTxtAllCharsShapes3 = "◜ ◝ ◞ ◟ ◠ ◡ ⋒ ⋓ ⋐ ⋑ ⥰ ╰ ╮ ╭ ╯ ⌒ ⥿ ⥾ ⥽ ⥼ ⥊ ⥋ ⥌ ⥍ ⥎ ⥐ ⥑ ⥏ ╳ ✕ ⤫ ⤬ ╱ ╲ ⧸ ⧹ ⌓ ◦ ❖ ✖ ✚ ✜ ⧓ ⧗ ⧑ ⧒ ⧖ _ ⚊ ╴ ╼ ╾ ‐ ⁃ ‑ ‒ - – ⎯ — ― ╶ ╺ ╸ ─ ━ ┄ ┅ ┈ ┉ ╌ ╍ ═ ≣ ≡ ☰ ☱ ☲ ☳ ☴ ☵ ☶ ☷ ╵ ╷ ╹ ╻ │ ▕ ▏ ┃ ┆ ┇ ┊ ╎ ┋ ╿ ╽ ⌞ ⌟ ⌜ ⌝ ⌊ ⌋ ⌈ ⌉ ⌋ ┌ ┍ ┎ ┏ ┐ ┑ ┒ ┓"
self.descTxtAllCharsShapes4 = "└ ┕ ┖ ┗ ┘ ┙ ┚ ┛ ├ ┝ ┞ ┟ ┠ ┡ ┢ ┣ ┤ ┥ ┦ ┧ ┨ ┩ ┪ ┫ ┬ ┭ ┮ ┳ ┴ ┵ ┶ ┷ ┸ ┹ ┺ ┻ ┼ ┽ ┾ ┿ ╀ ╁ ╂ ╃ ╄ ╅ ╆ ╇ ╈ ╉ ╊ ╋ ╏ ║ ╔ ╒ ╓ ╕ ╖ ╗ ╚ ╘ ╙ ╛ ╜ ╝ ╞ ╟ ╠ ╡ ╢ ╣ ╤ ╥ ╦ ╧ ╨ ╩ ╪ ╫ ╬"
self.descTxtAllCharsMath1 = "∞ ⟀ ⟁ ⟂ ⟃ ⟄ ⟇ ⟈ ⟉ ⟊ ⟐ ⟑ ⟒ ⟓ ⟔ ⟕ ⟖ ⟗ ⟘ ⟙ ⟚ ⟛ ⟜ ⟝ ⟞ ⟟ ⟠ ⟡ ⟢ ⟣ ⟤ ⟥ ⟦ ⟧ ⟨ ⟩ ⟪ ⟫ ⦀ ⦁ ⦂ ⦃ ⦄ ⦅ ⦆ ⦇ ⦈ ⦉ ⦊ ⦋ ⦌ ⦍ ⦎ ⦏ ⦐ ⦑ ⦒ ⦓ ⦔ ⦕ ⦖ ⦗ ⦘ ⦙ ⦚ ⦛ ⦜ ⦝ ⦞ ⦟ ⦠ ⦡ ⦢ ⦣ ⦤ ⦥ ⦦ ⦧ ⦨ ⦩ ⦪ ⦫ ⦬ ⦭ ⦮ ⦯ ⦰ ⦱ ⦲ ⦳ ⦴ ⦵ ⦶ ⦷ ⦸ ⦹ ⦺ ⦻ ⦼ ⦽ ⦾ ⦿ ⧀ ⧁ ⧂ ⧃ ⧄ ⧅ ⧆ ⧇ ⧈ ⧉ ⧊ ⧋ ⧌ ⧍ ⧎ ⧏ ⧐ ⧑ ⧒ ⧓ ⧔ ⧕ ⧖ ⧗"
self.descTxtAllCharsMath2 = "⧘ ⧙ ⧚ ⧛ ⧜ ⧝ ⧞ ⧟ ⧡ ⧢ ⧣ ⧤ ⧥ ⧦ ⧧ ⧨ ⧩ ⧪ ⧫ ⧬ ⧭ ⧮ ⧯ ⧰ ⧱ ⧲ ⧳ ⧴ ⧵ ⧶ ⧷ ⧸ ⧹ ⧺ ⧻ ⧼ ⧽ ⧾ ⧿ ∀ ∁ ∂ ∃ ∄ ∅ ∆ ∇ ∈ ∉ ∊ ∋ ∌ ∍ ∎ ∏ ∐ ∑ − ∓ ∔ ∕ ∖ ∗ ∘ ∙ √ ∛ ∜ ∝ ∟ ∠ ∡ ∢ ∣ ∤ ∥ ∦ ∧ ∨ ∩ ∪ ∫ ∬ ∭ ∮ ∯ ∰ ∱ ∲ ∳ ∴ ∵ ∶ ∷ "
self.descTxtAllCharsMath3 = "∸ ∹ ∺ ∻ ∼ ∽ ∾ ∿ ≀ ≁ ≂ ≃ ≄ ≅ ≆ ≇ ≈ ≉ ≊ ≋ ≌ ≍ ≎ ≏ ≐ ≑ ≒ ≓ ≔ ≕ ≖ ≗ ≘ ≙ ≚ ≛ ≜ ≝ ≞ ≟ ≠ ≡ ≢ ≣ ≤ ≥ ≦ ≧ ≨ ≩ ≪ ≫ ≬ ≭ ≮ ≯ ≰ ≱ ≲ ≳ ≴ ≵ ≶ ≷ ≸ ≹ ≺ ≻ ≼ ≽ ≾ ≿ ⊀ ⊁ ⊂ ⊃ ⊄ ⊅ ⊆ ⊇ ⊈ ⊉ ⊊ ⊋ ⊌ ⊍ ⊎ ⊏ ⊐ ⊑ ⊒ ⊓ ⊔ ⊕ ⊖ ⊗ ⊘ ⊙ ⊚ ⊛ ⊜ ⊝ ⊞ ⊟ ⊠ ⊡ ⊢ ⊣ ⊤ ⊥ ⊦ ⊧ ⊨ ⊩ ⊪ ⊫ ⊬ ⊭ ⊮ ⊯ ⊰ ⊱ ⊲ ⊳"
self.descTxtAllCharsMath4 = "⊴ ⊵ ⊶ ⊷ ⊸ ⊹ ⊺ ⊻ ⊼ ⊽ ⊾ ⊿ ⋀ ⋁ ⋂ ⋃ ⋄ ⋅ ⋆ ⋇ ⋈ ⋉ ⋊ ⋋ ⋌ ⋍ ⋎ ⋏ ⋐ ⋑ ⋒ ⋓ ⋔ ⋕ ⋖ ⋗ ⋘ ⋙ ⋚ ⋛ ⋜ ⋝ ⋞ ⋟ ⋠ ⋡ ⋢ ⋣ ⋤ ⋥ ⋦ ⋧ ⋨ ⋩ ⋪ ⋫ ⋬ ⋭ ⋮ ⋯ ⋰ ⋱ ⋲ ⋳ ⋴ ⋵ ⋶ ⋷ ⋸ ⋹ ⋺ ⋻ ⋼ ⋽ ⋾ ⋿ ✕ ✖ ✚"
self.descTxtAllCharsNumerals1 = "⓵ ⓶ ⓷ ⓸ ⓹ ⓺ ⓻ ⓼ ⓽ ⓾ ⒈ ⒉ ⒊ ⒋ ⒌ ⒍ ⒎ ⒏ ⒐ ⒑ ⒒ ⒓ ⒔ ⒕ ⒖ ⒗ ⒘ ⒙ ⒚ ⒛ ⓪ ① ② ③ ④ ⑤ ⑥ ⑦ ⑧ ⑨ ⑩ ➀ ➁ ➂ ➃ ➄ ➅ ➆ ➇ ➈ ➉ ⑪ ⑫ ⑬ ⑭ ⑮ ⑯ ⑰ ⑱ ⑲ ⑳"
self.descTxtAllCharsNumerals2 = "⓿ ❶ ❷ ❸ ❹ ❺ ❻ ❼ ❽ ❾ ❿ ➊ ➋ ➌ ➍ ➎ ➏ ➐ ➑ ➒ ➓ ⓫ ⓬ ⓭ ⓮ ⓯ ⓰ ⓱ ⓲ ⓳ ⓴ ⑴ ⑵ ⑶ ⑷ ⑸ ⑹ ⑺ ⑻ ⑼ ⑽ ⑾ ⑿ ⒀ ⒁ ⒂ ⒃ ⒄ ⒅ ⒆ ⒇ ¹ ² ³ ↉ ½ ⅓ ¼ ⅕ ⅙ ⅐ ⅛ ⅑ ⅒ ⅔ ⅖ ¾ ⅗ ⅜ ⅘ ⅚ ⅝ ⅞"
self.descTxtAllCharsPunch1 = "❝ ❞ ❛ ❜ ‘ ’ ‛ ‚ “ ” „ ‟ « » ‹ › Ꞌ < > @ × ‧ ¨ ․ ꞉ : ⁚ ⁝ ⁞ ‥ … ⁖ ⸪ ⸬ ⸫ ⸭ ⁛ ⁘ ⁙ ⁏ ; ⦂ ⁃ ‐ ‑ ‒ - – ⎯ — ― _ ~ ⁓ ⸛ ⸞ ⸟ ⸯ ¬ / \ ⁄ \ ⁄ | ⎜ ¦ ‖ ‗ "
self.descTxtAllCharsPunch2 = "† ‡ · • ⸰ ° ‣ ⁒ % ‰ ‱ & ⅋ § ÷ + ± = ꞊ ′ ″ ‴ ⁗ ‵ ‶ ‷ ‸ * ⁑ ⁎ ⁕ ※ ⁜ ⁂ ! ‼ ¡ ? ¿ ⸮ ⁇ ⁉ ⁈ ‽ ⸘ ¼ ½ ¾ ² ³ © ® ™ ℠ ℻ ℅ ℁ ⅍ ℄ ¶ ⁋ ❡ ⁌ ⁍ ⸖ ⸗ ⸚ ⸓ ( ) [ ] { } ⸨ ⸩ ❨ ❩ ❪ ❫ ⸦ ⸧ ❬ ❭ ❮ ❯ ❰ ❱ ❴ ❵ ❲ ❳ ⦗ ⦘ ⁅ ⁆ 〈 〉 ⏜ ⏝ ⏞ ⏟ ⸡ ⸠ ⸢ ⸣ ⸤ ⸥ ⎡ ⎤ ⎣ ⎦ ⎨ ⎬ ⌠ ⌡ ⎛ ⎠ ⎝ ⎞ ⁀ ⁔ ‿ ⁐ ‾ ⎟ ⎢ ⎥ ⎪ ꞁ ⎮ ⎧ ⎫ ⎩ ⎭ ⎰ ⎱ '"
self.descTxtAllCharsSymbol1 = "☂ ☔ ✈ ☀ ☼ ☁ ⚡ ⌁ ☇ ☈ ❄ ❅ ❆ ☃ ☉ ☄ ★ ☆ ☽ ☾ ⌛ ⌚ ⌂ ✆ ☎ ☏ ✉ ☑ ✓ ✔ ⎷ ⍻ ✖ ✗ ✘ ☒ ✕ ☓ ☕ ♿ ✌ ☚ ☛ ☜ ☝ ☞ ☟ ☹ ☺ ☻ ☯ ⚘ ☮ ⚰ ⚱ ⚠ ☠ ☢ ⚔ ⚓ ⎈ ⚒ ⚑ ⚐ ☡ ❂ ⚕ ⚖ ⚗ ✇ ☣ ⚙ ☤ ⚚ ⚛ ⚜ ☥ ✝ ☦ ☧ ☨ ☩ † ☪ ☫ ☬ ☭ ✁ ✂ ✃ ✄ ✍"
self.descTxtAllCharsSymbol2 = "✎ ✏ ✐ ✑ ✒ ✙ ✚ ✜ ✛ ♰ ♱ ✞ ✟ ✠ ✡ ☸ ✢ ✣ ✤ ✥ ✦ ✧ ✩ ✪ ✫ ✬ ✭ ✮ ✯ ✰ ✲ ✱ ✳ ✴ ✵ ✶ ✷ ✸ ✹ ✺ ✻ ✼ ✽ ✾ ❀ ✿ ❁ ❃ ❇ ❈ ❉ ❊ ❋ ⁕ ☘ ❦ ❧ ☙ ❢ ❣ ♀ ♂ ⚢ ⚣ ⚤ ⚦ ⚧ ⚨ ⚩ ☿ ♁ ⚯ ♛ ♕ ♚ ♔ ♜ ♖ ♝ ♗ ♞ ♘ ♟ ♙ ☗ ☖ ♠ ♣ ♦ ♥ ❤ ❥ ♡ ♢ ♤ ♧ ⚀ ⚁ ⚂ ⚃ ⚄ ⚅ ⚇ ⚆ ⚈ ⚉ ♨"
self.descTxtAllCharsSymbol3 = "♩ ♪ ♫ ♬ ♭ ♮ ♯ ⌨ ⏏ ⎗ ⎘ ⎙ ⎚ ⌥ ⎇ ⌘ ⌦ ⌫ ⌧ ♲ ♳ ♴ ♵ ♶ ♷ ♸ ♹ ♺ ♻ ♼ ♽ ⁌ ⁍ ⎌ ⌇ ⌲ ⍝ ⍟ ⍣ ⍤ ⍥ ⍨ ⍩ ⎋ ♃ ♄ ♅ ♆ ♇ ♈ ♉ ♊ ♋ ♌ ♍ ♎ ♏ ♐ ♑ ♒ ♓ ⏚ ⏛"
self.descTxtWhiteSpace = '''I
Have
Enters
And white space '''
if __name__ == '__main__':
CommunityFundRawTXCreateProposalTest().main()
| mit |
BaesFr/Sick-Beard | lib/jsonrpclib/jsonrpc.py | 86 | 17140 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================
JSONRPC Library (jsonrpclib)
============================
This library is a JSON-RPC v.2 (proposed) implementation which
follows the xmlrpclib API for portability between clients. It
uses the same Server / ServerProxy, loads, dumps, etc. syntax,
while providing features not present in XML-RPC like:
* Keyword arguments
* Notifications
* Versioning
* Batches and batch notifications
Eventually, I'll add a SimpleXMLRPCServer compatible library,
and other things to tie the thing off nicely. :)
For a quick-start, just open a console and type the following,
replacing the server address, method, and parameters
appropriately.
>>> import jsonrpclib
>>> server = jsonrpclib.Server('http://localhost:8181')
>>> server.add(5, 6)
11
>>> server._notify.add(5, 6)
>>> batch = jsonrpclib.MultiCall(server)
>>> batch.add(3, 50)
>>> batch.add(2, 3)
>>> batch._notify.add(3, 5)
>>> batch()
[53, 5]
See http://code.google.com/p/jsonrpclib/ for more info.
"""
import types
import sys
from xmlrpclib import Transport as XMLTransport
from xmlrpclib import SafeTransport as XMLSafeTransport
from xmlrpclib import ServerProxy as XMLServerProxy
from xmlrpclib import _Method as XML_Method
import time
import string
import random
# Library includes
import lib.jsonrpclib
from lib.jsonrpclib import config
from lib.jsonrpclib import history
# JSON library importing
cjson = None
json = None
try:
import cjson
except ImportError:
try:
import json
except ImportError:
try:
import lib.simplejson as json
except ImportError:
raise ImportError(
'You must have the cjson, json, or simplejson ' +
'module(s) available.'
)
IDCHARS = string.ascii_lowercase+string.digits
class UnixSocketMissing(Exception):
"""
Just a properly named Exception if Unix Sockets usage is
attempted on a platform that doesn't support them (Windows)
"""
pass
#JSON Abstractions
def jdumps(obj, encoding='utf-8'):
# Do 'serialize' test at some point for other classes
global cjson
if cjson:
return cjson.encode(obj)
else:
return json.dumps(obj, encoding=encoding)
def jloads(json_string):
global cjson
if cjson:
return cjson.decode(json_string)
else:
return json.loads(json_string)
# XMLRPClib re-implementations
class ProtocolError(Exception):
pass
class TransportMixIn(object):
""" Just extends the XMLRPC transport where necessary. """
user_agent = config.user_agent
# for Python 2.7 support
_connection = None
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json-rpc")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
def getparser(self):
target = JSONTarget()
return JSONParser(target), target
class JSONParser(object):
def __init__(self, target):
self.target = target
def feed(self, data):
self.target.feed(data)
def close(self):
pass
class JSONTarget(object):
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return ''.join(self.data)
class Transport(TransportMixIn, XMLTransport):
pass
class SafeTransport(TransportMixIn, XMLSafeTransport):
pass
from httplib import HTTP, HTTPConnection
from socket import socket
USE_UNIX_SOCKETS = False
try:
from socket import AF_UNIX, SOCK_STREAM
USE_UNIX_SOCKETS = True
except ImportError:
pass
if (USE_UNIX_SOCKETS):
class UnixHTTPConnection(HTTPConnection):
def connect(self):
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.connect(self.host)
class UnixHTTP(HTTP):
_connection_class = UnixHTTPConnection
class UnixTransport(TransportMixIn, XMLTransport):
def make_connection(self, host):
import httplib
host, extra_headers, x509 = self.get_host_info(host)
return UnixHTTP(host)
class ServerProxy(XMLServerProxy):
"""
Unfortunately, much more of this class has to be copied since
so much of it does the serialization.
"""
def __init__(self, uri, transport=None, encoding=None,
verbose=0, version=None):
import urllib
if not version:
version = config.version
self.__version = version
schema, uri = urllib.splittype(uri)
if schema not in ('http', 'https', 'unix'):
raise IOError('Unsupported JSON-RPC protocol.')
if schema == 'unix':
if not USE_UNIX_SOCKETS:
# Don't like the "generic" Exception...
raise UnixSocketMissing("Unix sockets not available.")
self.__host = uri
self.__handler = '/'
else:
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
# Not sure if this is in the JSON spec?
#self.__handler = '/'
self.__handler == '/'
if transport is None:
if schema == 'unix':
transport = UnixTransport()
elif schema == 'https':
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def _request(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version)
response = self._run_request(request)
check_for_errors(response)
return response['result']
def _request_notify(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version, notify=True)
response = self._run_request(request, notify=True)
check_for_errors(response)
return
def _run_request(self, request, notify=None):
history.add_request(request)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# Here, the XMLRPC library translates a single list
# response to the single value -- should we do the
# same, and require a tuple / list to be passed to
# the response object, or expect the Server to be
# outputting the response appropriately?
history.add_response(response)
if not response:
return None
return_obj = loads(response)
return return_obj
def __getattr__(self, name):
# Same as original, just with new _Method reference
return _Method(self._request, name)
@property
def _notify(self):
# Just like __getattr__, but with notify namespace.
return _Notify(self._request_notify)
class _Method(XML_Method):
def __call__(self, *args, **kwargs):
if len(args) > 0 and len(kwargs) > 0:
raise ProtocolError('Cannot use both positional ' +
'and keyword arguments (according to JSON-RPC spec.)')
if len(args) > 0:
return self.__send(self.__name, args)
else:
return self.__send(self.__name, kwargs)
def __getattr__(self, name):
self.__name = '%s.%s' % (self.__name, name)
return self
# The old method returned a new instance, but this seemed wasteful.
# The only thing that changes is the name.
#return _Method(self.__send, "%s.%s" % (self.__name, name))
class _Notify(object):
def __init__(self, request):
self._request = request
def __getattr__(self, name):
return _Method(self._request, name)
# Batch implementation
class MultiCallMethod(object):
def __init__(self, method, notify=False):
self.method = method
self.params = []
self.notify = notify
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ProtocolError('JSON-RPC does not support both ' +
'positional and keyword arguments.')
if len(kwargs) > 0:
self.params = kwargs
else:
self.params = args
def request(self, encoding=None, rpcid=None):
return dumps(self.params, self.method, version=2.0,
encoding=encoding, rpcid=rpcid, notify=self.notify)
def __repr__(self):
return '%s' % self.request()
def __getattr__(self, method):
new_method = '%s.%s' % (self.method, method)
self.method = new_method
return self
class MultiCallNotify(object):
def __init__(self, multicall):
self.multicall = multicall
def __getattr__(self, name):
new_job = MultiCallMethod(name, notify=True)
self.multicall._job_list.append(new_job)
return new_job
class MultiCallIterator(object):
def __init__(self, results):
self.results = results
def __iter__(self):
for i in range(0, len(self.results)):
yield self[i]
raise StopIteration
def __getitem__(self, i):
item = self.results[i]
check_for_errors(item)
return item['result']
def __len__(self):
return len(self.results)
class MultiCall(object):
def __init__(self, server):
self._server = server
self._job_list = []
def _request(self):
if len(self._job_list) < 1:
# Should we alert? This /is/ pretty obvious.
return
request_body = '[ %s ]' % ','.join([job.request() for
job in self._job_list])
responses = self._server._run_request(request_body)
del self._job_list[:]
if not responses:
responses = []
return MultiCallIterator(responses)
@property
def _notify(self):
return MultiCallNotify(self)
def __getattr__(self, name):
new_job = MultiCallMethod(name)
self._job_list.append(new_job)
return new_job
__call__ = _request
# These lines conform to xmlrpclib's "compatibility" line.
# Not really sure if we should include these, but oh well.
Server = ServerProxy
class Fault(object):
# JSON-RPC error class
def __init__(self, code=-32000, message='Server error', rpcid=None):
self.faultCode = code
self.faultString = message
self.rpcid = rpcid
def error(self):
return {'code':self.faultCode, 'message':self.faultString}
def response(self, rpcid=None, version=None):
if not version:
version = config.version
if rpcid:
self.rpcid = rpcid
return dumps(
self, methodresponse=True, rpcid=self.rpcid, version=version
)
def __repr__(self):
return '<Fault %s: %s>' % (self.faultCode, self.faultString)
def random_id(length=8):
return_id = ''
for i in range(length):
return_id += random.choice(IDCHARS)
return return_id
class Payload(dict):
def __init__(self, rpcid=None, version=None):
if not version:
version = config.version
self.id = rpcid
self.version = float(version)
def request(self, method, params=[]):
if type(method) not in types.StringTypes:
raise ValueError('Method name must be a string.')
if not self.id:
self.id = random_id()
request = { 'id':self.id, 'method':method }
if params:
request['params'] = params
if self.version >= 2:
request['jsonrpc'] = str(self.version)
return request
def notify(self, method, params=[]):
request = self.request(method, params)
if self.version >= 2:
del request['id']
else:
request['id'] = None
return request
def response(self, result=None):
response = {'result':result, 'id':self.id}
if self.version >= 2:
response['jsonrpc'] = str(self.version)
else:
response['error'] = None
return response
def error(self, code=-32000, message='Server error.'):
error = self.response()
if self.version >= 2:
del error['result']
else:
error['result'] = None
error['error'] = {'code':code, 'message':message}
return error
def dumps(params=[], methodname=None, methodresponse=None,
encoding=None, rpcid=None, version=None, notify=None):
"""
This differs from the Python implementation in that it implements
the rpcid argument since the 2.0 spec requires it for responses.
"""
if not version:
version = config.version
valid_params = (types.TupleType, types.ListType, types.DictType)
if methodname in types.StringTypes and \
type(params) not in valid_params and \
not isinstance(params, Fault):
"""
If a method, and params are not in a listish or a Fault,
error out.
"""
raise TypeError('Params must be a dict, list, tuple or Fault ' +
'instance.')
# Begin parsing object
payload = Payload(rpcid=rpcid, version=version)
if not encoding:
encoding = 'utf-8'
if type(params) is Fault:
response = payload.error(params.faultCode, params.faultString)
return jdumps(response, encoding=encoding)
if type(methodname) not in types.StringTypes and methodresponse != True:
raise ValueError('Method name must be a string, or methodresponse '+
'must be set to True.')
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
params = jsonclass.dump(params)
if methodresponse is True:
if rpcid is None:
raise ValueError('A method response must have an rpcid.')
response = payload.response(params)
return jdumps(response, encoding=encoding)
request = None
if notify == True:
request = payload.notify(methodname, params)
else:
request = payload.request(methodname, params)
return jdumps(request, encoding=encoding)
def loads(data):
"""
This differs from the Python implementation, in that it returns
the request structure in Dict format instead of the method, params.
It will return a list in the case of a batch request / response.
"""
if data == '':
# notification
return None
result = jloads(data)
# if the above raises an error, the implementing server code
# should return something like the following:
# { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
result = jsonclass.load(result)
return result
def check_for_errors(result):
if not result:
# Notification
return result
if type(result) is not types.DictType:
raise TypeError('Response is not a dict.')
if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
raise NotImplementedError('JSON-RPC version not yet supported.')
if 'result' not in result.keys() and 'error' not in result.keys():
raise ValueError('Response does not have a result or error key.')
if 'error' in result.keys() and result['error'] != None:
code = result['error']['code']
message = result['error']['message']
raise ProtocolError((code, message))
return result
def isbatch(result):
if type(result) not in (types.ListType, types.TupleType):
return False
if len(result) < 1:
return False
if type(result[0]) is not types.DictType:
return False
if 'jsonrpc' not in result[0].keys():
return False
try:
version = float(result[0]['jsonrpc'])
except ValueError:
raise ProtocolError('"jsonrpc" key must be a float(able) value.')
if version < 2:
return False
return True
def isnotification(request):
if 'id' not in request.keys():
# 2.0 notification
return True
if request['id'] == None:
# 1.0 notification
return True
return False
| gpl-3.0 |
mitocw/edx-platform | lms/djangoapps/instructor_task/tests/test_api.py | 3 | 15688 | """
Test for LMS instructor background task queue management
"""
import ddt
from celery.states import FAILURE
from mock import MagicMock, Mock, patch
from six.moves import range
from bulk_email.models import SEND_TO_LEARNERS, SEND_TO_MYSELF, SEND_TO_STAFF, CourseEmail
from common.test.utils import normalize_repr
from lms.djangoapps.courseware.tests.factories import UserFactory
from lms.djangoapps.certificates.models import CertificateGenerationHistory, CertificateStatuses
from lms.djangoapps.instructor_task.api import (
SpecificStudentIdMissingError,
generate_certificates_for_students,
get_instructor_task_history,
get_running_instructor_tasks,
regenerate_certificates,
submit_bulk_course_email,
submit_calculate_may_enroll_csv,
submit_calculate_problem_responses_csv,
submit_calculate_students_features_csv,
submit_cohort_students,
submit_course_survey_report,
submit_delete_entrance_exam_state_for_student,
submit_delete_problem_state_for_all_students,
submit_detailed_enrollment_features_csv,
submit_executive_summary_report,
submit_export_ora2_data,
submit_override_score,
submit_rescore_entrance_exam_for_student,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_reset_problem_attempts_in_entrance_exam
)
from lms.djangoapps.instructor_task.api_helper import AlreadyRunningError, QueueConnectionError
from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask
from lms.djangoapps.instructor_task.tasks import export_ora2_data
from lms.djangoapps.instructor_task.tests.test_base import (
TEST_COURSE_KEY,
InstructorTaskCourseTestCase,
InstructorTaskModuleTestCase,
InstructorTaskTestCase,
TestReportMixin
)
from xmodule.modulestore.exceptions import ItemNotFoundError
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API methods that involve the reporting of status for background tasks.
"""
def test_get_running_instructor_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
for _ in range(1, 5):
self._create_failure_entry()
self._create_success_entry()
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_KEY)]
self.assertEqual(set(task_ids), set(progress_task_ids))
def test_get_instructor_task_history(self):
# when fetching historical tasks, we get all tasks, including running tasks
expected_ids = []
for _ in range(1, 5):
expected_ids.append(self._create_failure_entry().task_id)
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_KEY, usage_key=self.problem_url)]
self.assertEqual(set(task_ids), set(expected_ids))
# make the same call using explicit task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_KEY,
usage_key=self.problem_url,
task_type='rescore_problem'
)]
self.assertEqual(set(task_ids), set(expected_ids))
# make the same call using a non-existent task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_KEY,
usage_key=self.problem_url,
task_type='dummy_type'
)]
self.assertEqual(set(task_ids), set())
@ddt.ddt
class InstructorTaskModuleSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of module-based background tasks."""
def setUp(self):
super(InstructorTaskModuleSubmitTest, self).setUp()
self.initialize_course()
self.student = UserFactory.create(username="student", email="[email protected]")
self.instructor = UserFactory.create(username="instructor", email="[email protected]")
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, problem_url)
def test_submit_nonrescorable_modules(self):
# confirm that a rescore of an existent but unscorable module returns an exception
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
# where we are creating real modules.)
problem_url = self.problem_section.location
request = None
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_student(request, problem_url, self.student)
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_all_students(request, problem_url)
@ddt.data(
(normalize_repr(submit_rescore_problem_for_all_students), 'rescore_problem'),
(
normalize_repr(submit_rescore_problem_for_all_students),
'rescore_problem_if_higher',
{'only_if_higher': True}
),
(normalize_repr(submit_rescore_problem_for_student), 'rescore_problem', {'student': True}),
(
normalize_repr(submit_rescore_problem_for_student),
'rescore_problem_if_higher',
{'student': True, 'only_if_higher': True}
),
(normalize_repr(submit_reset_problem_attempts_for_all_students), 'reset_problem_attempts'),
(normalize_repr(submit_delete_problem_state_for_all_students), 'delete_problem_state'),
(normalize_repr(submit_rescore_entrance_exam_for_student), 'rescore_problem', {'student': True}),
(
normalize_repr(submit_rescore_entrance_exam_for_student),
'rescore_problem_if_higher',
{'student': True, 'only_if_higher': True},
),
(normalize_repr(submit_reset_problem_attempts_in_entrance_exam), 'reset_problem_attempts', {'student': True}),
(normalize_repr(submit_delete_entrance_exam_state_for_student), 'delete_problem_state', {'student': True}),
(normalize_repr(submit_override_score), 'override_problem_score', {'student': True, 'score': 0})
)
@ddt.unpack
def test_submit_task(self, task_function, expected_task_type, params=None):
"""
Tests submission of instructor task.
"""
if params is None:
params = {}
if params.get('student'):
params['student'] = self.student
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
# unsuccessful submission, exception raised while submitting.
with patch('lms.djangoapps.instructor_task.tasks_base.BaseInstructorTask.apply_async') as apply_async:
error = Exception()
apply_async.side_effect = error
with self.assertRaises(QueueConnectionError):
instructor_task = task_function(self.create_task_request(self.instructor), location, **params)
most_recent_task = InstructorTask.objects.latest('id')
self.assertEqual(most_recent_task.task_state, FAILURE)
# successful submission
instructor_task = task_function(self.create_task_request(self.instructor), location, **params)
self.assertEqual(instructor_task.task_type, expected_task_type)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
task_function(self.create_task_request(self.instructor), location, **params)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class InstructorTaskCourseSubmitTest(TestReportMixin, InstructorTaskCourseTestCase):
"""Tests API methods that involve the submission of course-based background tasks."""
def setUp(self):
super(InstructorTaskCourseSubmitTest, self).setUp()
self.initialize_course()
self.student = UserFactory.create(username="student", email="[email protected]")
self.instructor = UserFactory.create(username="instructor", email="[email protected]")
def _define_course_email(self):
"""Create CourseEmail object for testing."""
course_email = CourseEmail.create(
self.course.id,
self.instructor,
[SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_LEARNERS],
"Test Subject",
"<p>This is a test message</p>"
)
return course_email.id
def _test_resubmission(self, api_call):
"""
Tests the resubmission of an instructor task through the API.
The call to the API is a lambda expression passed via
`api_call`. Expects that the API call returns the resulting
InstructorTask object, and that its resubmission raises
`AlreadyRunningError`.
"""
instructor_task = api_call()
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
api_call()
def test_submit_bulk_email_all(self):
email_id = self._define_course_email()
api_call = lambda: submit_bulk_course_email(
self.create_task_request(self.instructor),
self.course.id,
email_id
)
self._test_resubmission(api_call)
def test_submit_calculate_problem_responses(self):
api_call = lambda: submit_calculate_problem_responses_csv(
self.create_task_request(self.instructor),
self.course.id,
problem_location=''
)
self._test_resubmission(api_call)
def test_submit_calculate_students_features(self):
api_call = lambda: submit_calculate_students_features_csv(
self.create_task_request(self.instructor),
self.course.id,
features=[]
)
self._test_resubmission(api_call)
def test_submit_enrollment_report_features_csv(self):
api_call = lambda: submit_detailed_enrollment_features_csv(self.create_task_request(self.instructor),
self.course.id)
self._test_resubmission(api_call)
def test_submit_executive_summary_report(self):
api_call = lambda: submit_executive_summary_report(
self.create_task_request(self.instructor), self.course.id
)
self._test_resubmission(api_call)
def test_submit_course_survey_report(self):
api_call = lambda: submit_course_survey_report(
self.create_task_request(self.instructor), self.course.id
)
self._test_resubmission(api_call)
def test_submit_calculate_may_enroll(self):
api_call = lambda: submit_calculate_may_enroll_csv(
self.create_task_request(self.instructor),
self.course.id,
features=[]
)
self._test_resubmission(api_call)
def test_submit_cohort_students(self):
api_call = lambda: submit_cohort_students(
self.create_task_request(self.instructor),
self.course.id,
file_name=u'filename.csv'
)
self._test_resubmission(api_call)
def test_submit_ora2_request_task(self):
request = self.create_task_request(self.instructor)
with patch('lms.djangoapps.instructor_task.api.submit_task') as mock_submit_task:
mock_submit_task.return_value = MagicMock()
submit_export_ora2_data(request, self.course.id)
mock_submit_task.assert_called_once_with(
request, 'export_ora2_data', export_ora2_data, self.course.id, {}, '')
def test_submit_generate_certs_students(self):
"""
Tests certificates generation task submission api
"""
api_call = lambda: generate_certificates_for_students(
self.create_task_request(self.instructor),
self.course.id
)
self._test_resubmission(api_call)
def test_regenerate_certificates(self):
"""
Tests certificates regeneration task submission api
"""
def api_call():
"""
wrapper method for regenerate_certificates
"""
return regenerate_certificates(
self.create_task_request(self.instructor),
self.course.id,
[CertificateStatuses.downloadable, CertificateStatuses.generating]
)
self._test_resubmission(api_call)
def test_certificate_generation_no_specific_student_id(self):
"""
Raises ValueError when student_set is 'specific_student' and 'specific_student_id' is None.
"""
with self.assertRaises(SpecificStudentIdMissingError):
generate_certificates_for_students(
self.create_task_request(self.instructor),
self.course.id,
student_set='specific_student',
specific_student_id=None
)
def test_certificate_generation_history(self):
"""
Tests that a new record is added whenever certificate generation/regeneration task is submitted.
"""
instructor_task = generate_certificates_for_students(
self.create_task_request(self.instructor),
self.course.id
)
certificate_generation_history = CertificateGenerationHistory.objects.filter(
course_id=self.course.id,
generated_by=self.instructor,
instructor_task=instructor_task,
is_regeneration=False
)
# Validate that record was added to CertificateGenerationHistory
self.assertTrue(certificate_generation_history.exists())
instructor_task = regenerate_certificates(
self.create_task_request(self.instructor),
self.course.id,
[CertificateStatuses.downloadable, CertificateStatuses.generating]
)
certificate_generation_history = CertificateGenerationHistory.objects.filter(
course_id=self.course.id,
generated_by=self.instructor,
instructor_task=instructor_task,
is_regeneration=True
)
# Validate that record was added to CertificateGenerationHistory
self.assertTrue(certificate_generation_history.exists())
| agpl-3.0 |
zalando/zmon-aws-agent | tests/test_elastigroup.py | 1 | 9681 | from unittest.mock import patch, MagicMock
import boto3
import pytest
from botocore.exceptions import ClientError
from spotinst_sdk import SpotinstClientException
import requests_mock
import zmon_aws_agent
from zmon_aws_agent.elastigroup import Elastigroup, extract_instance_details
def test_get_elastigroup_entities(monkeypatch):
stack_names = MagicMock()
stack_names.return_value = ['foo', 'bar']
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_all_stack_names', stack_names)
elastigroup_resources = MagicMock()
elastigroup_resources.return_value = [Elastigroup('42', 'test', 'acct-id', 'acc-tkn')]
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup_resources', elastigroup_resources)
elastigroup = MagicMock()
elastigroup.return_value = {'name': 'test', 'created_at': 'now',
'compute': {'availability_zones': [{'name': 'az1'}],
'instance_types': ['type1', 'type2'],
'launch_specification': {
'tags': [{'tag_key': 'tag1', 'tag_value': 'value1'}]
}},
'capacity': {'target': 1, 'maximum': 1, 'minimum': 1},
'strategy': {'risk': 100, 'availability_vs_cost': 'balanced'}}
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup', elastigroup)
elastigroup_status = MagicMock()
elastigroup_status.return_value = [{"instance_id": "i-fake", "private_ip": "127.0.0.1"}]
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup_instances', elastigroup_status)
entities = zmon_aws_agent.elastigroup.get_elastigroup_entities('region1', 'acc1')
assert len(entities) == 2
first = entities[0]
assert first['type'] == 'elastigroup'
assert first['risk'] == 100
assert first['orientation'] == 'balanced'
assert first['tag1'] == 'value1' # validate that tags are added
first_instance = first['instances'][0]
assert first_instance['aws_id'] == 'i-fake'
assert first_instance['ip'] == '127.0.0.1'
def test_get_elastigroup_entities_missing_attributes(monkeypatch):
stack_names = MagicMock()
stack_names.return_value = ['foo', 'bar']
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_all_stack_names', stack_names)
elastigroup_resources = MagicMock()
elastigroup_resources.return_value = [Elastigroup('42', 'test', 'acct-id', 'acc-tkn')]
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup_resources', elastigroup_resources)
elastigroup = MagicMock()
elastigroup.return_value = {'id': 'sig-123456'}
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup', elastigroup)
elastigroup_status = MagicMock()
elastigroup_status.return_value = [{"unexpected_key1": "i-fake"}]
monkeypatch.setattr('zmon_aws_agent.elastigroup.get_elastigroup_instances', elastigroup_status)
entities = zmon_aws_agent.elastigroup.get_elastigroup_entities('region1', 'acc1')
assert len(entities) == 2
first = entities[0]
assert first['id'] == 'elastigroup-sig-123456[acc1:region1]'
assert first['type'] == 'elastigroup'
assert first['risk'] == 100
assert first['orientation'] == 'balanced'
first_instance = first['instances'][0]
assert first_instance['aws_id'] == 'missing-instance-id'
assert first_instance['ip'] == 'missing-private-ip'
@pytest.mark.parametrize(
'lsr,gt,err,out',
(
# happy case with 1 stack of the expected type
({'StackResourceSummaries': [
{'LogicalResourceId': 'test', 'PhysicalResourceId': '42', 'ResourceType': 'Custom::elastigroup'}]},
{'TemplateBody': {'Resources': {'test': {'Properties': {'accessToken': 'fake', 'accountId': '12345'}}}}},
None,
[Elastigroup('42', 'test', '12345', 'fake')]),
# resource with other type ignored
({'StackResourceSummaries': [
{'LogicalResourceId': 'test', 'PhysicalResourceId': '42', 'ResourceType': 'Custom::elastigroup'},
{'LogicalResourceId': 'test2', 'PhysicalResourceId': 'id', 'ResourceType': 'Custom::other-stuff'},
]},
{'TemplateBody': {'Resources': {'test': {'Properties': {'accessToken': 'fake', 'accountId': '12345'}}}}},
None,
[Elastigroup('42', 'test', '12345', 'fake')]),
# only resource with other types ignored
({'StackResourceSummaries': [
{'LogicalResourceId': 'test', 'PhysicalResourceId': '42', 'ResourceType': 'Custom::foo'},
{'LogicalResourceId': 'test2', 'PhysicalResourceId': 'id', 'ResourceType': 'Custom::bar'},
]},
None,
None,
[]),
# boto error
(None,
None,
ClientError({'Error': {'Code': '500', 'Message': 'Somebody Set Us Up The Bomb'}}, "dont-care"),
[]),
)
)
def test_get_elastigroup_resources(lsr, gt, err, out):
def mock_make_api_call(self, operation_name, kwarg):
if err:
raise err
if operation_name == 'ListStackResources':
return lsr
elif operation_name == 'GetTemplate':
return gt
raise ValueError(operation_name + ' not expected')
with patch('botocore.client.BaseClient._make_api_call', new=mock_make_api_call):
cf = boto3.client('cloudformation', region_name='eu-central-1')
resources = zmon_aws_agent.elastigroup.get_elastigroup_resources(cf, 'dontcare')
# assert all([a == b for a, b in zip(resources, out)])
assert resources == out
@pytest.mark.parametrize(
'resp,err,out',
(
({'StackSummaries': [{'StackName': 'foo'}]}, None, ['foo']),
({'StackSummaries': [{'StackName': 'foo'}, {'StackName': 'bar'}]}, None, ['foo', 'bar']),
(None, ClientError({'Error': {'Code': '500', 'Message': 'Somebody Set Us Up The Bomb'}}, "dont-care"), []),
)
)
def test_get_all_stack_names(resp, err, out):
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == 'ListStacks':
if err:
raise err
return resp
raise ValueError(operation_name + ' not expected')
with patch('botocore.client.BaseClient._make_api_call', new=mock_make_api_call):
cf = boto3.client('cloudformation', region_name='eu-central-1')
assert zmon_aws_agent.elastigroup.get_all_stack_names(cf) == out
@pytest.mark.parametrize(
'data,err,result,expected',
(
(Elastigroup("42", "name", "1234", "fake"), None, {"id": "42", "foo": "bar"}, {"id": "42", "foo": "bar"}),
(Elastigroup("42", "name", "1234", "fake"), SpotinstClientException("test", "fake"), None, None),
)
)
def test_get_elastigroup(data, err, result, expected):
with patch('spotinst_sdk.SpotinstClient.get_elastigroup') as elastigroup_mock:
elastigroup_mock.return_value = result
elastigroup_mock.side_effect = err
got = zmon_aws_agent.elastigroup.get_elastigroup(data)
assert got == expected
@pytest.mark.parametrize(
'data,err,result,expected',
(
(Elastigroup("42", "name", "12345", "fake"), None, [{"foo": "bar"}], [{"foo": "bar"}]),
(Elastigroup("42", "name", "12345", "fake"), SpotinstClientException("test", "fake"), None, []),
)
)
def test_get_elastigroup_instances(data, err, result, expected):
with patch('spotinst_sdk.SpotinstClient.get_elastigroup_active_instances') as elastigroup_status_mock:
elastigroup_status_mock.return_value = result
elastigroup_status_mock.side_effect = err
got = zmon_aws_agent.elastigroup.get_elastigroup_instances(data)
assert got == expected
def test_extract_instance_details():
# Example from https://api.spotinst.com/spotinst-api/elastigroup/amazon-web-services/status/
resp = '{"request":{"id":"890a90c2-5264-482b-a72b-e021557227e4","url":"/aws/ec2/group/sig-12345678/status",' \
'"method":"GET","timestamp":"2018-06-25T11:51:42.629Z"},"response":{"status":{"code":200,"message":"OK"},' \
'"kind":"spotinst:aws:ec2:group","items":[{"spotInstanceRequestId":"sir-3thgagpn",' \
'"instanceId":"i-0cc289f12538e4758","instanceType":"t2.micro","product":"Linux/UNIX",' \
'"groupId":"sig-12345678","availabilityZone":"us-west-2a","privateIp":"172.31.28.210",' \
'"createdAt":"2018-06-25T11:49:00.000Z","publicIp":"10.10.10.10","status":"fulfilled"},' \
'{"spotInstanceRequestId":null,"instanceId":"i-05ebb28abebdc718b","instanceType":"t2.medium",' \
'"product":"Linux/UNIX","groupId":"sig-05417358","availabilityZone":"us-west-2a",' \
'"privateIp":"172.31.17.189","createdAt":"2018-06-25T11:49:02.000Z","publicIp":"10.10.10.10",' \
'"status":"running"}],"count":2}}'
with requests_mock.Mocker() as m:
m.get("https://api.spotinst.io/aws/ec2/group/42/status", text=resp)
got = zmon_aws_agent.elastigroup.get_elastigroup_instances(Elastigroup("42", "name", "12345", "fake"))
assert len(got) == 2
inst1 = extract_instance_details(got[0])
assert inst1['type'] == 't2.micro'
assert inst1['spot']
assert inst1['availability_zone'] == 'us-west-2a'
inst2 = extract_instance_details(got[1])
assert inst2['type'] == 't2.medium'
assert inst2['spot'] is False
assert inst2['availability_zone'] == 'us-west-2a'
| apache-2.0 |
coolbombom/CouchPotatoServer | libs/requests/packages/urllib3/contrib/ntlmpool.py | 59 | 4740 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| gpl-3.0 |
AunShiLord/sympy | sympy/utilities/tests/test_autowrap.py | 54 | 6665 | # Tests that require installed backends go into
# sympy/test_external/test_autowrap
import os
import tempfile
import shutil
from sympy.utilities.autowrap import (autowrap, binary_function,
CythonCodeWrapper, ufuncify, UfuncifyCodeWrapper, CodeWrapper)
from sympy.utilities.codegen import (CCodeGen, CodeGenArgumentListError,
make_routine)
from sympy.utilities.pytest import raises
from sympy.core import symbols, Eq
from sympy.core.compatibility import StringIO
def get_string(dump_fn, routines, prefix="file"):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix)
source = output.getvalue()
output.close()
return source
def test_cython_wrapper_scalar_function():
x, y, z = symbols('x,y,z')
expr = (x + y)*z
routine = make_routine("test", expr)
code_gen = CythonCodeWrapper(CCodeGen())
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" double test(double x, double y, double z)\n"
"\n"
"def test_c(double x, double y, double z):\n"
"\n"
" return test(x, y, z)")
assert source == expected
def test_cython_wrapper_outarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(CCodeGen())
routine = make_routine("test", Equality(z, x + y))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" void test(double x, double y, double *z)\n"
"\n"
"def test_c(double x, double y):\n"
"\n"
" cdef double z = 0\n"
" test(x, y, &z)\n"
" return z")
assert source == expected
def test_cython_wrapper_inoutarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(CCodeGen())
routine = make_routine("test", Equality(z, x + y + z))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" void test(double x, double y, double *z)\n"
"\n"
"def test_c(double x, double y, double z):\n"
"\n"
" test(x, y, &z)\n"
" return z")
assert source == expected
def test_autowrap_dummy():
x, y, z = symbols('x y z')
# Uses DummyWrapper to test that codegen works as expected
f = autowrap(x + y, backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "nameless"
f = autowrap(Eq(z, x + y), backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "z"
f = autowrap(Eq(z, x + y + z), backend='dummy')
assert f() == str(x + y + z)
assert f.args == "x, y, z"
assert f.returns == "z"
def test_autowrap_args():
x, y, z = symbols('x y z')
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y),
backend='dummy', args=[x]))
f = autowrap(Eq(z, x + y), backend='dummy', args=[y, x])
assert f() == str(x + y)
assert f.args == "y, x"
assert f.returns == "z"
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y + z),
backend='dummy', args=[x, y]))
f = autowrap(Eq(z, x + y + z), backend='dummy', args=[y, x, z])
assert f() == str(x + y + z)
assert f.args == "y, x, z"
assert f.returns == "z"
def test_autowrap_store_files():
x, y = symbols('x y')
tmp = tempfile.mkdtemp()
try:
f = autowrap(x + y, backend='dummy', tempdir=tmp)
assert f() == str(x + y)
assert os.access(tmp, os.F_OK)
finally:
shutil.rmtree(tmp)
def test_binary_function():
x, y = symbols('x y')
f = binary_function('f', x + y, backend='dummy')
assert f._imp_() == str(x + y)
def test_ufuncify_source():
x, y, z = symbols('x,y,z')
code_wrapper = UfuncifyCodeWrapper(CCodeGen("ufuncify"))
CodeWrapper._module_counter = 0
routine = make_routine("test", x + y + z)
source = get_string(code_wrapper.dump_c, [routine])
expected = """\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include "file.h"
static PyMethodDef wrapper_module_0Methods[] = {
{NULL, NULL, 0, NULL}
};
static void test_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
char *in0 = args[0];
char *in1 = args[1];
char *in2 = args[2];
char *out1 = args[3];
npy_intp in0_step = steps[0];
npy_intp in1_step = steps[1];
npy_intp in2_step = steps[2];
npy_intp out1_step = steps[3];
for (i = 0; i < n; i++) {
*((double *)out1) = test(*(double *)in0, *(double *)in1, *(double *)in2);
in0 += in0_step;
in1 += in1_step;
in2 += in2_step;
out1 += out1_step;
}
}
PyUFuncGenericFunction test_funcs[1] = {&test_ufunc};
static char test_types[4] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE};
static void *test_data[1] = {NULL};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"wrapper_module_0",
NULL,
-1,
wrapper_module_0Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_wrapper_module_0(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1,
PyUFunc_None, "wrapper_module_0", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "test", ufunc0);
Py_DECREF(ufunc0);
return m;
}
#else
PyMODINIT_FUNC initwrapper_module_0(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = Py_InitModule("wrapper_module_0", wrapper_module_0Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1,
PyUFunc_None, "wrapper_module_0", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "test", ufunc0);
Py_DECREF(ufunc0);
}
#endif"""
assert source == expected
| bsd-3-clause |
deepmind/dm_control | dm_control/mujoco/wrapper/core.py | 1 | 34339 | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Main user-facing classes and utility functions for loading MuJoCo models."""
import contextlib
import ctypes
import os
import threading
import weakref
from absl import logging
from dm_control.mujoco.wrapper import util
from dm_control.mujoco.wrapper.mjbindings import constants
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.mujoco.wrapper.mjbindings import functions
from dm_control.mujoco.wrapper.mjbindings import mjlib
from dm_control.mujoco.wrapper.mjbindings import types
from dm_control.mujoco.wrapper.mjbindings import wrappers
import numpy as np
# Internal analytics import.
# Unused internal import: resources.
_NULL = b"\00"
_FAKE_XML_FILENAME = b"model.xml"
_FAKE_BINARY_FILENAME = b"model.mjb"
# Although `mjMAXVFSNAME` from `mjmodel.h` specifies a limit of 100 bytes
# (including the terminal null byte), the actual limit seems to be 99 bytes
# (98 characters).
_MAX_VFS_FILENAME_CHARACTERS = 98
_VFS_FILENAME_TOO_LONG = (
"Filename length {length} exceeds {limit} character limit: {filename}")
_INVALID_FONT_SCALE = ("`font_scale` must be one of {}, got {{}}."
.format(enums.mjtFontScale))
_CONTACT_ID_OUT_OF_RANGE = (
"`contact_id` must be between 0 and {max_valid} (inclusive), got: {actual}."
)
# Global cache used to store finalizers for freeing ctypes pointers.
# Contains {pointer_address: weakref_object} pairs.
_FINALIZERS = {}
class Error(Exception):
"""Base class for MuJoCo exceptions."""
pass
if constants.mjVERSION_HEADER != mjlib.mj_version():
raise Error("MuJoCo library version ({0}) does not match header version "
"({1})".format(constants.mjVERSION_HEADER, mjlib.mj_version()))
_REGISTERED = False
_REGISTRATION_LOCK = threading.Lock()
_ERROR_BUFSIZE = 1000
# This is used to keep track of the `MJMODEL` pointer that was most recently
# loaded by `_get_model_ptr_from_xml`. Only this model can be saved to XML.
_LAST_PARSED_MODEL_PTR = None
_NOT_LAST_PARSED_ERROR = (
"Only the model that was most recently loaded from an XML file or string "
"can be saved to an XML file.")
import time
# NB: Python functions that are called from C are defined at module-level to
# ensure they won't be garbage-collected before they are called.
@ctypes.CFUNCTYPE(None, ctypes.c_char_p)
def _warning_callback(message):
logging.warning(util.to_native_string(message))
@ctypes.CFUNCTYPE(None, ctypes.c_char_p)
def _error_callback(message):
logging.fatal(util.to_native_string(message))
# Override MuJoCo's callbacks for handling warnings and errors.
mjlib.mju_user_warning = ctypes.c_void_p.in_dll(mjlib, "mju_user_warning")
mjlib.mju_user_error = ctypes.c_void_p.in_dll(mjlib, "mju_user_error")
mjlib.mju_user_warning.value = ctypes.cast(
_warning_callback, ctypes.c_void_p).value
mjlib.mju_user_error.value = ctypes.cast(
_error_callback, ctypes.c_void_p).value
def enable_timer(enabled=True):
if enabled:
set_callback("mjcb_time", time.time)
else:
set_callback("mjcb_time", None)
def _maybe_register_license(path=None):
"""Registers the MuJoCo license if not already registered.
Args:
path: Optional custom path to license key file.
Raises:
Error: If the license could not be registered.
"""
with _REGISTRATION_LOCK:
global _REGISTERED
if not _REGISTERED:
if path is None:
path = util.get_mjkey_path()
# TODO(b/176220357): Repeatedly activating a trial license results in
# errors (for example this could happen if
# `mj_activate` was already called by another library
# within the same process). To avoid such errors we
# unconditionally deactivate any active licenses before
# calling `mj_activate`.
mjlib.mj_deactivate()
result = mjlib.mj_activate(util.to_binary_string(path))
if result == 1:
_REGISTERED = True
# Internal analytics of mj_activate.
elif result == 0:
raise Error("Could not register license.")
else:
raise Error("Unknown registration error (code: {})".format(result))
def _str2type(type_str):
type_id = mjlib.mju_str2Type(util.to_binary_string(type_str))
if not type_id:
raise Error("{!r} is not a valid object type name.".format(type_str))
return type_id
def _type2str(type_id):
type_str_ptr = mjlib.mju_type2Str(type_id)
if not type_str_ptr:
raise Error("{!r} is not a valid object type ID.".format(type_id))
return ctypes.string_at(type_str_ptr)
def set_callback(name, new_callback=None):
"""Sets a user-defined callback function to modify MuJoCo's behavior.
Callback functions should have the following signature:
func(const_mjmodel_ptr, mjdata_ptr) -> None
Args:
name: Name of the callback to set. Must be a field in
`functions.function_pointers`.
new_callback: The new callback. This can be one of the following:
* A Python callable
* A C function exposed by a `ctypes.CDLL` object
* An integer specifying the address of a callback function
* None, in which case any existing callback of that name is removed
"""
setattr(functions.callbacks, name, new_callback)
@contextlib.contextmanager
def callback_context(name, new_callback=None):
"""Context manager that temporarily overrides a MuJoCo callback function.
On exit, the callback will be restored to its original value (None if the
callback was not already overridden when the context was entered).
Args:
name: Name of the callback to set. Must be a field in
`mjbindings.function_pointers`.
new_callback: The new callback. This can be one of the following:
* A Python callable
* A C function exposed by a `ctypes.CDLL` object
* An integer specifying the address of a callback function
* None, in which case any existing callback of that name is removed
Yields:
None
"""
old_callback = getattr(functions.callbacks, name)
set_callback(name, new_callback)
try:
yield
finally:
# Ensure that the callback is reset on exit, even if an exception is raised.
set_callback(name, old_callback)
def get_schema():
"""Returns a string containing the schema used by the MuJoCo XML parser."""
buf = ctypes.create_string_buffer(100000)
mjlib.mj_printSchema(None, buf, len(buf), 0, 0)
return buf.value
@contextlib.contextmanager
def _temporary_vfs(filenames_and_contents):
"""Creates a temporary VFS containing one or more files.
Args:
filenames_and_contents: A dict containing `{filename: contents}` pairs.
The length of each filename must not exceed 98 characters.
Yields:
A `types.MJVFS` instance.
Raises:
Error: If a file cannot be added to the VFS, or if an error occurs when
looking up the filename.
ValueError: If the length of a filename exceeds 98 characters.
"""
vfs = types.MJVFS()
mjlib.mj_defaultVFS(vfs)
for filename, contents in filenames_and_contents.items():
if len(filename) > _MAX_VFS_FILENAME_CHARACTERS:
raise ValueError(
_VFS_FILENAME_TOO_LONG.format(
length=len(filename),
limit=_MAX_VFS_FILENAME_CHARACTERS,
filename=filename))
filename = util.to_binary_string(filename)
contents = util.to_binary_string(contents)
_, extension = os.path.splitext(filename)
# For XML files we need to append a NULL byte, otherwise MuJoCo's parser
# can sometimes read past the end of the string. However, we should *not*
# do this for other file types (in particular for STL meshes, where this
# causes MuJoCo's compiler to complain that the file size is incorrect).
append_null = extension.lower() == b".xml"
num_bytes = len(contents) + append_null
retcode = mjlib.mj_makeEmptyFileVFS(vfs, filename, num_bytes)
if retcode == 1:
raise Error("Failed to create {!r}: VFS is full.".format(filename))
elif retcode == 2:
raise Error("Failed to create {!r}: duplicate filename.".format(filename))
file_index = mjlib.mj_findFileVFS(vfs, filename)
if file_index == -1:
raise Error("Could not find {!r} in the VFS".format(filename))
vf = vfs.filedata[file_index]
vf_as_char_arr = ctypes.cast(vf, ctypes.POINTER(ctypes.c_char * num_bytes))
vf_as_char_arr.contents[:len(contents)] = contents
if append_null:
vf_as_char_arr.contents[-1] = _NULL
try:
yield vfs
finally:
mjlib.mj_deleteVFS(vfs) # Ensure that we free the VFS afterwards.
def _create_finalizer(ptr, free_func):
"""Creates a finalizer for a ctypes pointer.
Args:
ptr: A `ctypes.POINTER` to be freed.
free_func: A callable that frees the pointer. It will be called with `ptr`
as its only argument when `ptr` is garbage collected.
"""
ptr_type = type(ptr)
address = ctypes.addressof(ptr.contents)
if address not in _FINALIZERS: # Only one finalizer needed per address.
logging.debug("Allocated %s at %x", ptr_type.__name__, address)
def callback(dead_ptr_ref):
"""A weakref callback that frees the resource held by a pointer."""
del dead_ptr_ref # Unused weakref to the dead ctypes pointer object.
if address not in _FINALIZERS:
# Someone had already explicitly called `call_finalizer_for_pointer`.
return
else:
# Turn the address back into a pointer to be freed.
if ctypes.cast is None:
return
# `ctypes.cast` might be None if the interpreter is in the process of
# exiting. In this case it doesn't really matter whether or not we
# explicitly free the pointer, since any remaining pointers will be
# freed anyway when the process terminates. We bail out silently in
# order to avoid logging an unsightly (but harmless) error.
temp_ptr = ctypes.cast(address, ptr_type)
free_func(temp_ptr)
logging.debug("Freed %s at %x", ptr_type.__name__, address)
del _FINALIZERS[address] # Remove the weakref from the global cache.
# Store weakrefs in a global cache so that they don't get garbage collected
# before their referents.
_FINALIZERS[address] = (weakref.ref(ptr, callback), callback)
def _finalize(ptr):
"""Calls the finalizer for the specified pointer to free allocated memory."""
address = ctypes.addressof(ptr.contents)
try:
ptr_ref, callback = _FINALIZERS[address]
callback(ptr_ref)
except KeyError:
pass
def _load_xml(filename, vfs_or_none):
"""Invokes `mj_loadXML` with logging/error handling."""
error_buf = ctypes.create_string_buffer(_ERROR_BUFSIZE)
model_ptr = mjlib.mj_loadXML(
util.to_binary_string(filename),
vfs_or_none,
error_buf,
_ERROR_BUFSIZE)
if not model_ptr:
raise Error(util.to_native_string(error_buf.value))
elif error_buf.value:
logging.warning(util.to_native_string(error_buf.value))
# Free resources when the ctypes pointer is garbage collected.
_create_finalizer(model_ptr, mjlib.mj_deleteModel)
return model_ptr
def _get_model_ptr_from_xml(xml_path=None, xml_string=None, assets=None):
"""Parses a model XML file, compiles it, and returns a pointer to an mjModel.
Args:
xml_path: None or a path to a model XML file in MJCF or URDF format.
xml_string: None or an XML string containing an MJCF or URDF model
description.
assets: None or a dict containing external assets referenced by the model
(such as additional XML files, textures, meshes etc.), in the form of
`{filename: contents_string}` pairs. The keys should correspond to the
filenames specified in the model XML. Ignored if `xml_string` is None.
One of `xml_path` or `xml_string` must be specified.
Returns:
A `ctypes.POINTER` to a new `mjbindings.types.MJMODEL` instance.
Raises:
TypeError: If both or neither of `xml_path` and `xml_string` are specified.
Error: If the model is not created successfully.
"""
if xml_path is None and xml_string is None:
raise TypeError(
"At least one of `xml_path` or `xml_string` must be specified.")
elif xml_path is not None and xml_string is not None:
raise TypeError(
"Only one of `xml_path` or `xml_string` may be specified.")
_maybe_register_license()
if xml_string is not None:
assets = {} if assets is None else assets.copy()
# Ensure that the fake XML filename doesn't overwrite an existing asset.
xml_path = _FAKE_XML_FILENAME
while xml_path in assets:
xml_path = "_" + xml_path
assets[xml_path] = xml_string
with _temporary_vfs(assets) as vfs:
ptr = _load_xml(xml_path, vfs)
else:
ptr = _load_xml(xml_path, None)
global _LAST_PARSED_MODEL_PTR
_LAST_PARSED_MODEL_PTR = ptr
return ptr
def save_last_parsed_model_to_xml(xml_path, check_model=None):
"""Writes a description of the most recently loaded model to an MJCF XML file.
Args:
xml_path: Path to the output XML file.
check_model: Optional `MjModel` instance. If specified, this model will be
checked to see if it is the most recently parsed one, and a ValueError
will be raised otherwise.
Raises:
Error: If MuJoCo encounters an error while writing the XML file.
ValueError: If `check_model` was passed, and this model is not the most
recently parsed one.
"""
if check_model and check_model.ptr is not _LAST_PARSED_MODEL_PTR:
raise ValueError(_NOT_LAST_PARSED_ERROR)
error_buf = ctypes.create_string_buffer(_ERROR_BUFSIZE)
mjlib.mj_saveLastXML(util.to_binary_string(xml_path),
_LAST_PARSED_MODEL_PTR,
error_buf,
_ERROR_BUFSIZE)
if error_buf.value:
raise Error(error_buf.value)
def _get_model_ptr_from_binary(binary_path=None, byte_string=None):
"""Returns a pointer to an mjModel from the contents of a MuJoCo model binary.
Args:
binary_path: Path to an MJB file (as produced by MjModel.save_binary).
byte_string: String of bytes (as returned by MjModel.to_bytes).
One of `binary_path` or `byte_string` must be specified.
Returns:
A `ctypes.POINTER` to a new `mjbindings.types.MJMODEL` instance.
Raises:
TypeError: If both or neither of `byte_string` and `binary_path`
are specified.
"""
if binary_path is None and byte_string is None:
raise TypeError(
"At least one of `byte_string` or `binary_path` must be specified.")
elif binary_path is not None and byte_string is not None:
raise TypeError(
"Only one of `byte_string` or `binary_path` may be specified.")
_maybe_register_license()
if byte_string is not None:
with _temporary_vfs({_FAKE_BINARY_FILENAME: byte_string}) as vfs:
ptr = mjlib.mj_loadModel(_FAKE_BINARY_FILENAME, vfs)
else:
ptr = mjlib.mj_loadModel(util.to_binary_string(binary_path), None)
# Free resources when the ctypes pointer is garbage collected.
_create_finalizer(ptr, mjlib.mj_deleteModel)
return ptr
# Subclasses implementing constructors/destructors for low-level wrappers.
# ------------------------------------------------------------------------------
class MjModel(wrappers.MjModelWrapper):
"""Wrapper class for a MuJoCo 'mjModel' instance.
MjModel encapsulates features of the model that are expected to remain
constant. It also contains simulation and visualization options which may be
changed occasionally, although this is done explicitly by the user.
"""
def __init__(self, model_ptr):
"""Creates a new MjModel instance from a ctypes pointer.
Args:
model_ptr: A `ctypes.POINTER` to an `mjbindings.types.MJMODEL` instance.
"""
super().__init__(ptr=model_ptr)
def __getstate__(self):
# All of MjModel's state is assumed to reside within the MuJoCo C struct.
# However there is no mechanism to prevent users from adding arbitrary
# Python attributes to an MjModel instance - these would not be serialized.
return self.to_bytes()
def __setstate__(self, byte_string):
model_ptr = _get_model_ptr_from_binary(byte_string=byte_string)
self.__init__(model_ptr)
def __copy__(self):
new_model_ptr = mjlib.mj_copyModel(None, self.ptr)
return self.__class__(new_model_ptr)
@classmethod
def from_xml_string(cls, xml_string, assets=None):
"""Creates an `MjModel` instance from a model description XML string.
Args:
xml_string: String containing an MJCF or URDF model description.
assets: Optional dict containing external assets referenced by the model
(such as additional XML files, textures, meshes etc.), in the form of
`{filename: contents_string}` pairs. The keys should correspond to the
filenames specified in the model XML.
Returns:
An `MjModel` instance.
"""
model_ptr = _get_model_ptr_from_xml(xml_string=xml_string, assets=assets)
return cls(model_ptr)
@classmethod
def from_byte_string(cls, byte_string):
"""Creates an MjModel instance from a model binary as a string of bytes."""
model_ptr = _get_model_ptr_from_binary(byte_string=byte_string)
return cls(model_ptr)
@classmethod
def from_xml_path(cls, xml_path):
"""Creates an MjModel instance from a path to a model XML file."""
model_ptr = _get_model_ptr_from_xml(xml_path=xml_path)
return cls(model_ptr)
@classmethod
def from_binary_path(cls, binary_path):
"""Creates an MjModel instance from a path to a compiled model binary."""
model_ptr = _get_model_ptr_from_binary(binary_path=binary_path)
return cls(model_ptr)
def save_binary(self, binary_path):
"""Saves the MjModel instance to a binary file."""
mjlib.mj_saveModel(self.ptr, util.to_binary_string(binary_path), None, 0)
def to_bytes(self):
"""Serialize the model to a string of bytes."""
bufsize = mjlib.mj_sizeModel(self.ptr)
buf = ctypes.create_string_buffer(bufsize)
mjlib.mj_saveModel(self.ptr, None, buf, bufsize)
return buf.raw
def copy(self):
"""Returns a copy of this MjModel instance."""
return self.__copy__()
def free(self):
"""Frees the native resources held by this MjModel.
This is an advanced feature for use when manual memory management is
necessary. This MjModel object MUST NOT be used after this function has
been called.
"""
_finalize(self._ptr)
del self._ptr
def name2id(self, name, object_type):
"""Returns the integer ID of a specified MuJoCo object.
Args:
name: String specifying the name of the object to query.
object_type: The type of the object. Can be either a lowercase string
(e.g. 'body', 'geom') or an `mjtObj` enum value.
Returns:
An integer object ID.
Raises:
Error: If `object_type` is not a valid MuJoCo object type, or if no object
with the corresponding name and type was found.
"""
if not isinstance(object_type, int):
object_type = _str2type(object_type)
obj_id = mjlib.mj_name2id(
self.ptr, object_type, util.to_binary_string(name))
if obj_id == -1:
raise Error("Object of type {!r} with name {!r} does not exist.".format(
_type2str(object_type), name))
return obj_id
def id2name(self, object_id, object_type):
"""Returns the name associated with a MuJoCo object ID, if there is one.
Args:
object_id: Integer ID.
object_type: The type of the object. Can be either a lowercase string
(e.g. 'body', 'geom') or an `mjtObj` enum value.
Returns:
A string containing the object name, or an empty string if the object ID
either doesn't exist or has no name.
Raises:
Error: If `object_type` is not a valid MuJoCo object type.
"""
if not isinstance(object_type, int):
object_type = _str2type(object_type)
name_ptr = mjlib.mj_id2name(self.ptr, object_type, object_id)
if not name_ptr:
return ""
return util.to_native_string(ctypes.string_at(name_ptr))
@contextlib.contextmanager
def disable(self, *flags):
"""Context manager for temporarily disabling MuJoCo flags.
Args:
*flags: Positional arguments specifying flags to disable. Can be either
lowercase strings (e.g. 'gravity', 'contact') or `mjtDisableBit` enum
values.
Yields:
None
Raises:
ValueError: If any item in `flags` is neither a valid name nor a value
from `enums.mjtDisableBit`.
"""
old_bitmask = self.opt.disableflags
new_bitmask = old_bitmask
for flag in flags:
if isinstance(flag, str):
try:
field_name = "mjDSBL_" + flag.upper()
bitmask = getattr(enums.mjtDisableBit, field_name)
except AttributeError:
valid_names = [field_name.split("_")[1].lower()
for field_name in enums.mjtDisableBit._fields[:-1]]
raise ValueError("'{}' is not a valid flag name. Valid names: {}"
.format(flag, ", ".join(valid_names)))
else:
if flag not in enums.mjtDisableBit[:-1]:
raise ValueError("'{}' is not a value in `enums.mjtDisableBit`. "
"Valid values: {}"
.format(flag, tuple(enums.mjtDisableBit[:-1])))
bitmask = flag
new_bitmask |= bitmask
self.opt.disableflags = new_bitmask
try:
yield
finally:
self.opt.disableflags = old_bitmask
@property
def name(self):
"""Returns the name of the model."""
# The model name is the first null-terminated string in the `names` buffer.
return util.to_native_string(
ctypes.string_at(ctypes.addressof(self.names.contents)))
class MjData(wrappers.MjDataWrapper):
"""Wrapper class for a MuJoCo 'mjData' instance.
MjData contains all of the dynamic variables and intermediate results produced
by the simulation. These are expected to change on each simulation timestep.
"""
def __init__(self, model):
"""Construct a new MjData instance.
Args:
model: An MjModel instance.
"""
self._model = model
# Allocate resources for mjData.
data_ptr = mjlib.mj_makeData(model.ptr)
# Free resources when the ctypes pointer is garbage collected.
_create_finalizer(data_ptr, mjlib.mj_deleteData)
super().__init__(data_ptr, model)
def __getstate__(self):
# Note: we can replace this once a `saveData` MJAPI function exists.
# To reconstruct an MjData instance we need three things:
# 1. Its parent MjModel instance
# 2. A subset of its fixed-size fields whose values aren't determined by
# the model
# 3. The contents of its internal buffer (all of its pointer fields point
# into this)
struct_fields = {}
for name in ["solver", "timer", "warning"]:
struct_fields[name] = getattr(self, name).copy()
scalar_field_names = ["ncon", "time", "energy"]
scalar_fields = {name: getattr(self, name) for name in scalar_field_names}
static_fields = {"struct_fields": struct_fields,
"scalar_fields": scalar_fields}
buffer_contents = ctypes.string_at(self.buffer_, self.nbuffer)
return (self._model, static_fields, buffer_contents)
def __setstate__(self, state_tuple):
# Replace this once a `loadData` MJAPI function exists.
self._model, static_fields, buffer_contents = state_tuple
self.__init__(self.model)
for name, contents in static_fields["struct_fields"].items():
getattr(self, name)[:] = contents
for name, value in static_fields["scalar_fields"].items():
# Array and scalar values must be handled separately.
try:
getattr(self, name)[:] = value
except TypeError:
setattr(self, name, value)
buf_ptr = (ctypes.c_char * self.nbuffer).from_address(self.buffer_)
buf_ptr[:] = buffer_contents
def __copy__(self):
# This makes a shallow copy that shares the same parent MjModel instance.
new_obj = self.__class__(self.model)
mjlib.mj_copyData(new_obj.ptr, self.model.ptr, self.ptr)
return new_obj
def copy(self):
"""Returns a copy of this MjData instance with the same parent MjModel."""
return self.__copy__()
def free(self):
"""Frees the native resources held by this MjData.
This is an advanced feature for use when manual memory management is
necessary. This MjData object MUST NOT be used after this function has
been called.
"""
_finalize(self._ptr)
del self._ptr
def object_velocity(self, object_id, object_type, local_frame=False):
"""Returns the 6D velocity (linear, angular) of a MuJoCo object.
Args:
object_id: Object identifier. Can be either integer ID or String name.
object_type: The type of the object. Can be either a lowercase string
(e.g. 'body', 'geom') or an `mjtObj` enum value.
local_frame: Boolean specifiying whether the velocity is given in the
global (worldbody), or local (object) frame.
Returns:
2x3 array with stacked (linear_velocity, angular_velocity)
Raises:
Error: If `object_type` is not a valid MuJoCo object type, or if no object
with the corresponding name and type was found.
"""
if not isinstance(object_type, int):
object_type = _str2type(object_type)
velocity = np.empty(6, dtype=np.float64)
if not isinstance(object_id, int):
object_id = self.model.name2id(object_id, object_type)
mjlib.mj_objectVelocity(self.model.ptr, self.ptr,
object_type, object_id, velocity, local_frame)
# MuJoCo returns velocities in (angular, linear) order, which we flip here.
return velocity.reshape(2, 3)[::-1]
def contact_force(self, contact_id):
"""Returns the wrench of a contact as a 2 x 3 array of (forces, torques).
Args:
contact_id: Integer, the index of the contact within the contact buffer
(`self.contact`).
Returns:
2x3 array with stacked (force, torque). Note that the order of dimensions
is (normal, tangent, tangent), in the contact's frame.
Raises:
ValueError: If `contact_id` is negative or bigger than ncon-1.
"""
if not 0 <= contact_id < self.ncon:
raise ValueError(_CONTACT_ID_OUT_OF_RANGE
.format(max_valid=self.ncon-1, actual=contact_id))
wrench = np.empty(6, dtype=np.float64)
mjlib.mj_contactForce(self.model.ptr, self.ptr, contact_id, wrench)
return wrench.reshape(2, 3)
@property
def model(self):
"""The parent MjModel for this MjData instance."""
return self._model
@util.CachedProperty
def _contact_buffer(self):
"""Cached structured array containing the full contact buffer."""
contact_array = util.buf_to_npy(
super().contact, shape=(self._model.nconmax,))
return contact_array
@property
def contact(self):
"""Variable-length recarray containing all current contacts."""
return self._contact_buffer[:self.ncon]
# Docstrings for these subclasses are inherited from their Wrapper parent class.
class MjvCamera(wrappers.MjvCameraWrapper):
def __init__(self):
ptr = ctypes.pointer(types.MJVCAMERA())
mjlib.mjv_defaultCamera(ptr)
super().__init__(ptr)
class MjvOption(wrappers.MjvOptionWrapper):
def __init__(self):
ptr = ctypes.pointer(types.MJVOPTION())
mjlib.mjv_defaultOption(ptr)
# Do not visualize rangefinder lines by default:
ptr.contents.flags[enums.mjtVisFlag.mjVIS_RANGEFINDER] = False
super().__init__(ptr)
class UnmanagedMjrContext(wrappers.MjrContextWrapper):
"""A wrapper for MjrContext that does not manage the native object's lifetime.
This wrapper is provided for API backward-compatibility reasons, since the
creating and destruction of an MjrContext requires an OpenGL context to be
provided.
"""
def __init__(self):
ptr = ctypes.pointer(types.MJRCONTEXT())
mjlib.mjr_defaultContext(ptr)
super().__init__(ptr)
class MjrContext(wrappers.MjrContextWrapper): # pylint: disable=missing-docstring
def __init__(self,
model,
gl_context,
font_scale=enums.mjtFontScale.mjFONTSCALE_150):
"""Initializes this MjrContext instance.
Args:
model: An `MjModel` instance.
gl_context: A `render.ContextBase` instance.
font_scale: Integer controlling the font size for text. Must be a value
in `mjbindings.enums.mjtFontScale`.
Raises:
ValueError: If `font_scale` is invalid.
"""
if font_scale not in enums.mjtFontScale:
raise ValueError(_INVALID_FONT_SCALE.format(font_scale))
ptr = ctypes.pointer(types.MJRCONTEXT())
mjlib.mjr_defaultContext(ptr)
with gl_context.make_current() as ctx:
ctx.call(mjlib.mjr_makeContext, model.ptr, ptr, font_scale)
ctx.call(mjlib.mjr_setBuffer, enums.mjtFramebuffer.mjFB_OFFSCREEN, ptr)
gl_context.increment_refcount()
# Free resources when the ctypes pointer is garbage collected.
def finalize_mjr_context(ptr):
if not gl_context.terminated:
with gl_context.make_current() as ctx:
ctx.call(mjlib.mjr_freeContext, ptr)
gl_context.decrement_refcount()
_create_finalizer(ptr, finalize_mjr_context)
super().__init__(ptr)
def free(self):
"""Frees the native resources held by this MjrContext.
This is an advanced feature for use when manual memory management is
necessary. This MjrContext object MUST NOT be used after this function has
been called.
"""
_finalize(self._ptr)
del self._ptr
# A mapping from human-readable short names to mjtRndFlag enum values, i.e.
# {'shadow': mjtRndFlag.mjRND_SHADOW, 'fog': mjtRndFlag.mjRND_FOG, ...}
_NAME_TO_RENDER_FLAG_ENUM_VALUE = {
name[len("mjRND_"):].lower(): getattr(enums.mjtRndFlag, name)
for name in enums.mjtRndFlag._fields[:-1] # Exclude mjRND_NUMRNDFLAG entry.
}
def _estimate_max_renderable_geoms(model):
"""Estimates the maximum number of renderable geoms for a given model."""
# Only one type of object frame can be rendered at once.
max_nframes = max(
[model.nbody, model.ngeom, model.nsite, model.ncam, model.nlight])
# This is probably an underestimate, but it is unlikely that all possible
# rendering options will be enabled simultaneously, or that all renderable
# geoms will be present within the viewing frustum at the same time.
return (
3 * max_nframes + # 1 geom per axis for each frame.
4 * model.ngeom + # geom itself + contacts + 2 * split contact forces.
3 * model.nbody + # COM + inertia box + perturbation force.
model.nsite +
model.ntendon +
model.njnt +
model.nu +
model.nskin +
model.ncam +
model.nlight)
class MjvScene(wrappers.MjvSceneWrapper): # pylint: disable=missing-docstring
def __init__(self, model=None, max_geom=None):
"""Initializes a new `MjvScene` instance.
Args:
model: (optional) An `MjModel` instance.
max_geom: (optional) An integer specifying the maximum number of geoms
that can be represented in the scene. If None, this will be chosen
automatically based on `model`.
"""
model_ptr = model.ptr if model is not None else None
scene_ptr = ctypes.pointer(types.MJVSCENE())
if max_geom is None:
if model is None:
max_renderable_geoms = 0
else:
max_renderable_geoms = _estimate_max_renderable_geoms(model)
max_geom = max(1000, max_renderable_geoms)
# Allocate and initialize resources for the abstract scene.
mjlib.mjv_makeScene(model_ptr, scene_ptr, max_geom)
# Free resources when the ctypes pointer is garbage collected.
_create_finalizer(scene_ptr, mjlib.mjv_freeScene)
super().__init__(scene_ptr)
@contextlib.contextmanager
def override_flags(self, overrides):
"""Context manager for temporarily overriding rendering flags.
Args:
overrides: A mapping specifying rendering flags to override. The keys can
be either lowercase strings or `mjtRndFlag` enum values, and the values
are the overridden flag values, e.g. `{'wireframe': True}` or
`{enums.mjtRndFlag.mjRND_WIREFRAME: True}`. See `enums.mjtRndFlag` for
the set of valid flags.
Yields:
None
"""
if not overrides:
yield
else:
original_flags = self.flags.copy()
for key, value in overrides.items():
index = _NAME_TO_RENDER_FLAG_ENUM_VALUE.get(key, key)
self.flags[index] = value
try:
yield
finally:
np.copyto(self.flags, original_flags)
def free(self):
"""Frees the native resources held by this MjvScene.
This is an advanced feature for use when manual memory management is
necessary. This MjvScene object MUST NOT be used after this function has
been called.
"""
_finalize(self._ptr)
del self._ptr
@util.CachedProperty
def _geoms_buffer(self):
"""Cached recarray containing the full geom buffer."""
return util.buf_to_npy(super().geoms, shape=(self.maxgeom,))
@property
def geoms(self):
"""Variable-length recarray containing all geoms currently in the buffer."""
return self._geoms_buffer[:self.ngeom]
class MjvPerturb(wrappers.MjvPerturbWrapper):
def __init__(self):
ptr = ctypes.pointer(types.MJVPERTURB())
mjlib.mjv_defaultPerturb(ptr)
super().__init__(ptr)
class MjvFigure(wrappers.MjvFigureWrapper):
def __init__(self):
ptr = ctypes.pointer(types.MJVFIGURE())
mjlib.mjv_defaultFigure(ptr)
super().__init__(ptr)
| apache-2.0 |
jeffery9/mixprint_addons | portal/wizard/portal_wizard.py | 7 | 9586 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import email_re
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# welcome email sent to portal users
# (note that calling '_' has no effect except exporting those strings for translation)
WELCOME_EMAIL_SUBJECT = _("Your OpenERP account at %(company)s")
WELCOME_EMAIL_BODY = _("""Dear %(name)s,
You have been given access to %(portal)s.
Your login account data is:
Database: %(db)s
Username: %(login)s
In order to complete the signin process, click on the following url:
%(url)s
%(welcome_message)s
--
OpenERP - Open Source Business Applications
http://www.openerp.com
""")
def extract_email(email):
""" extract the email address from a user-friendly email address """
m = email_re.search(email or "")
return m and m.group(0) or ""
class wizard(osv.osv_memory):
"""
A wizard to manage the creation/removal of portal users.
"""
_name = 'portal.wizard'
_description = 'Portal Access Management'
_columns = {
'portal_id': fields.many2one('res.groups', domain=[('is_portal', '=', True)], required=True,
string='Portal', help="The portal that users can be added in or removed from."),
'user_ids': fields.one2many('portal.wizard.user', 'wizard_id', string='Users'),
'welcome_message': fields.text(string='Invitation Message',
help="This text is included in the email sent to new users of the portal."),
}
def _default_portal(self, cr, uid, context):
portal_ids = self.pool.get('res.groups').search(cr, uid, [('is_portal', '=', True)])
return portal_ids and portal_ids[0] or False
_defaults = {
'portal_id': _default_portal,
}
def onchange_portal_id(self, cr, uid, ids, portal_id, context=None):
# for each partner, determine corresponding portal.wizard.user records
res_partner = self.pool.get('res.partner')
partner_ids = context and context.get('active_ids') or []
contact_ids = set()
user_changes = []
for partner in res_partner.browse(cr, SUPERUSER_ID, partner_ids, context):
for contact in (partner.child_ids or [partner]):
# make sure that each contact appears at most once in the list
if contact.id not in contact_ids:
contact_ids.add(contact.id)
in_portal = False
if contact.user_ids:
in_portal = portal_id in [g.id for g in contact.user_ids[0].groups_id]
user_changes.append((0, 0, {
'partner_id': contact.id,
'email': contact.email,
'in_portal': in_portal,
}))
return {'value': {'user_ids': user_changes}}
def action_apply(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context)
portal_user_ids = [user.id for user in wizard.user_ids]
self.pool.get('portal.wizard.user').action_apply(cr, uid, portal_user_ids, context)
return {'type': 'ir.actions.act_window_close'}
class wizard_user(osv.osv_memory):
"""
A model to configure users in the portal wizard.
"""
_name = 'portal.wizard.user'
_description = 'Portal User Config'
_columns = {
'wizard_id': fields.many2one('portal.wizard', string='Wizard', required=True),
'partner_id': fields.many2one('res.partner', string='Contact', required=True, readonly=True),
'email': fields.char(size=240, string='Email'),
'in_portal': fields.boolean('In Portal'),
}
def create(self, cr, uid, values, context=None):
""" overridden to update the partner's email (if necessary) """
id = super(wizard_user, self).create(cr, uid, values, context)
wuser = self.browse(cr, uid, id, context)
if wuser.partner_id.email != wuser.email:
wuser.partner_id.write({'email': wuser.email})
return id
def action_apply(self, cr, uid, ids, context=None):
for wizard_user in self.browse(cr, SUPERUSER_ID, ids, context):
portal = wizard_user.wizard_id.portal_id
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
if wizard_user.in_portal:
# create a user if necessary, and make sure it is in the portal group
if not user:
user = self._create_user(cr, SUPERUSER_ID, wizard_user, context)
if (not user.active) or (portal not in user.groups_id):
user.write({'active': True, 'groups_id': [(4, portal.id)]})
# prepare for the signup process
user.partner_id.signup_prepare()
wizard_user = self.browse(cr, SUPERUSER_ID, wizard_user.id, context)
self._send_email(cr, uid, wizard_user, context)
else:
# remove the user (if it exists) from the portal group
if user and (portal in user.groups_id):
# if user belongs to portal only, deactivate it
if len(user.groups_id) <= 1:
user.write({'groups_id': [(3, portal.id)], 'active': False})
else:
user.write({'groups_id': [(3, portal.id)]})
def _retrieve_user(self, cr, uid, wizard_user, context=None):
""" retrieve the (possibly inactive) user corresponding to wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
if wizard_user.partner_id.user_ids:
return wizard_user.partner_id.user_ids[0]
# the user may be inactive, search for it
res_users = self.pool.get('res.users')
domain = [('partner_id', '=', wizard_user.partner_id.id), ('active', '=', False)]
user_ids = res_users.search(cr, uid, domain)
return user_ids and res_users.browse(cr, uid, user_ids[0], context) or False
def _create_user(self, cr, uid, wizard_user, context=None):
""" create a new user for wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
res_users = self.pool.get('res.users')
create_context = dict(context or {}, noshortcut=True) # to prevent shortcut creation
values = {
'login': extract_email(wizard_user.email),
'partner_id': wizard_user.partner_id.id,
'groups_id': [(6, 0, [])],
'share': True,
}
user_id = res_users.create(cr, uid, values, context=create_context)
return res_users.browse(cr, uid, user_id, context)
def _send_email(self, cr, uid, wizard_user, context=None):
""" send notification email to a new portal user
@param wizard_user: browse record of model portal.wizard.user
@return: the id of the created mail.mail record
"""
this_context = context
this_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context)
if not this_user.email:
raise osv.except_osv(_('Email required'),
_('You must have an email address in your User Preferences to send emails.'))
# determine subject and body in the portal user's language
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
context = dict(this_context or {}, lang=user.lang)
data = {
'company': this_user.company_id.name,
'portal': wizard_user.wizard_id.portal_id.name,
'welcome_message': wizard_user.wizard_id.welcome_message or "",
'db': cr.dbname,
'name': user.name,
'login': user.login,
'url': user.signup_url,
}
mail_mail = self.pool.get('mail.mail')
mail_values = {
'email_from': this_user.email,
'email_to': user.email,
'subject': _(WELCOME_EMAIL_SUBJECT) % data,
'body_html': '<pre>%s</pre>' % (_(WELCOME_EMAIL_BODY) % data),
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create(cr, uid, mail_values, context=this_context)
return mail_mail.send(cr, uid, [mail_id], context=this_context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jpoimboe/linux | scripts/gdb/linux/symbols.py | 588 | 6302 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['module_core']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = arg.split()
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 |
avocado-framework/avocado-vt | virttest/utils_windows/drive.py | 2 | 3534 | """
Windows drive utilities
"""
from . import wmic
from virttest import utils_misc
def _logical_disks(session, cond=None, props=None):
cmd = wmic.make_query("LogicalDisk", cond, props,
get_swch=wmic.FMT_TYPE_LIST)
out = utils_misc.wait_for(lambda: wmic.parse_list(session.cmd(cmd,
timeout=120)), 240)
return out if out else []
def get_hard_drive_letter(session, label):
"""
Get hard drive's letter by the given label.
:param session: Session object.
:param label: Label pattern string.
:return: Hard drive's letter if found, otherwise `None`.
"""
cond = "VolumeName like '%s'" % label
try:
return _logical_disks(session, cond=cond, props=["DeviceID"])[0]
except IndexError:
return None
def get_floppy_drives_letter(session):
"""
Get all the floppy drives' letter.
:param session: Session object.
:return: Floppy drives' letter.
"""
cond = "MediaType!=0 AND MediaType!=11 AND MediaType!=12"
return _logical_disks(session, cond=cond, props=["DeviceID"])
def rescan_disks(session):
"""
Rescan disks in windows guest.
:param session: Session object.
"""
script_path = r"%TEMP%\rescan.dp"
rescan_cmd = "echo rescan > {0} && diskpart /s {0}"
session.cmd(rescan_cmd.format(script_path))
def extend_volume(session, vol_id, size=None):
"""
Extend a volume in windows guest.
:param session: Session object.
:param vol_id: Drive letter or Volume number.
:param size: Default extend the volume to maximum available size,
if size is specified, extend the volume to size.
The default unit of size is M.
"""
script_path = r"%TEMP%\extend_{0}.dp".format(vol_id)
extend_cmd = 'echo select volume %s > {0} && ' % vol_id
if not size:
extend_cmd += 'echo extend >> {0} && diskpart /s {0}'
else:
extend_cmd += 'echo extend desired=%s >> {0} ' % size
extend_cmd += '&& diskpart /s {0}'
session.cmd(extend_cmd.format(script_path))
def shrink_volume(session, vol_id, size):
"""
Shrink a volume in windows guest.
:param session: Session object.
:param vol_id: Drive letter or Volume number.
:param size: Desired decrease size. The default unit of size is M.
"""
script_path = r"%TEMP%\shrink_{0}.dp".format(vol_id)
shrink_cmd = 'echo select volume %s > {0} && ' % vol_id
shrink_cmd += 'echo shrink desired=%s >> {0} ' % size
shrink_cmd += '&& diskpart /s {0}'
session.cmd(shrink_cmd.format(script_path))
def get_disk_props_by_serial_number(session, serial_number, props):
"""
Get disk drive value of properties by serial number in windows guest.
:param session: Windows VM session.
:type session: aexpect.ShellSession
:param serial_number: The serial number of disk drive.
:type serial_number: str
:param props: The list of properties to be get.
e.g: ['DeviceID', 'Index', 'Name']
:type props: list
:return: The mapping between properties and values.
:rtype: dict
"""
cond = "SerialNumber like '%s'" % serial_number
cmd = wmic.make_query('diskdrive', cond, props=props, get_swch=wmic.FMT_TYPE_LIST)
out = wmic.parse_list(session.cmd(cmd, timeout=120))
if out:
mapping = out[-1]
if isinstance(mapping, str):
return {props[0]: mapping}
return mapping
return {}
| gpl-2.0 |
crazy-cat/incubator-mxnet | example/fcn-xs/data.py | 52 | 6068 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
""" file iterator for pasval voc 2012"""
import mxnet as mx
import numpy as np
import sys, os
from mxnet.io import DataIter
from PIL import Image
class FileIter(DataIter):
"""FileIter object in fcn-xs example. Taking a file list file to get dataiter.
in this example, we use the whole image training for fcn-xs, that is to say
we do not need resize/crop the image to the same size, so the batch_size is
set to 1 here
Parameters
----------
root_dir : string
the root dir of image/label lie in
flist_name : string
the list file of iamge and label, every line owns the form:
index \t image_data_path \t image_label_path
cut_off_size : int
if the maximal size of one image is larger than cut_off_size, then it will
crop the image with the minimal size of that image
data_name : string
the data name used in symbol data(default data name)
label_name : string
the label name used in symbol softmax_label(default label name)
"""
def __init__(self, root_dir, flist_name,
rgb_mean = (117, 117, 117),
cut_off_size = None,
data_name = "data",
label_name = "softmax_label"):
super(FileIter, self).__init__()
self.root_dir = root_dir
self.flist_name = os.path.join(self.root_dir, flist_name)
self.mean = np.array(rgb_mean) # (R, G, B)
self.cut_off_size = cut_off_size
self.data_name = data_name
self.label_name = label_name
self.num_data = len(open(self.flist_name, 'r').readlines())
self.f = open(self.flist_name, 'r')
self.data, self.label = self._read()
self.cursor = -1
def _read(self):
"""get two list, each list contains two elements: name and nd.array value"""
_, data_img_name, label_img_name = self.f.readline().strip('\n').split("\t")
data = {}
label = {}
data[self.data_name], label[self.label_name] = self._read_img(data_img_name, label_img_name)
return list(data.items()), list(label.items())
def _read_img(self, img_name, label_name):
img = Image.open(os.path.join(self.root_dir, img_name))
label = Image.open(os.path.join(self.root_dir, label_name))
assert img.size == label.size
img = np.array(img, dtype=np.float32) # (h, w, c)
label = np.array(label) # (h, w)
if self.cut_off_size is not None:
max_hw = max(img.shape[0], img.shape[1])
min_hw = min(img.shape[0], img.shape[1])
if min_hw > self.cut_off_size:
rand_start_max = int(np.random.uniform(0, max_hw - self.cut_off_size - 1))
rand_start_min = int(np.random.uniform(0, min_hw - self.cut_off_size - 1))
if img.shape[0] == max_hw :
img = img[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size]
label = label[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size]
else :
img = img[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size]
label = label[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size]
elif max_hw > self.cut_off_size:
rand_start = int(np.random.uniform(0, max_hw - min_hw - 1))
if img.shape[0] == max_hw :
img = img[rand_start : rand_start + min_hw, :]
label = label[rand_start : rand_start + min_hw, :]
else :
img = img[:, rand_start : rand_start + min_hw]
label = label[:, rand_start : rand_start + min_hw]
reshaped_mean = self.mean.reshape(1, 1, 3)
img = img - reshaped_mean
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # (c, h, w)
img = np.expand_dims(img, axis=0) # (1, c, h, w)
label = np.array(label) # (h, w)
label = np.expand_dims(label, axis=0) # (1, h, w)
return (img, label)
@property
def provide_data(self):
"""The name and shape of data provided by this iterator"""
return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator"""
return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label]
def get_batch_size(self):
return 1
def reset(self):
self.cursor = -1
self.f.close()
self.f = open(self.flist_name, 'r')
def iter_next(self):
self.cursor += 1
if(self.cursor < self.num_data-1):
return True
else:
return False
def next(self):
"""return one dict which contains "data" and "label" """
if self.iter_next():
self.data, self.label = self._read()
return {self.data_name : self.data[0][1],
self.label_name : self.label[0][1]}
else:
raise StopIteration
| apache-2.0 |
Shade5/coala | setup.py | 8 | 4774 | #!/usr/bin/env python3
import datetime
import locale
import platform
import sys
from os import getenv
from subprocess import call
import setuptools.command.build_py
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from coalib import VERSION, assert_supported_version, get_version
from coalib.misc.BuildManPage import BuildManPage
try:
locale.getlocale()
except (ValueError, UnicodeError):
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
assert_supported_version()
class BuildPyCommand(setuptools.command.build_py.build_py):
def run(self):
if platform.system() != 'Windows':
self.run_command('build_manpage')
setuptools.command.build_py.build_py.run(self)
class PyTestCommand(TestCommand):
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main([])
sys.exit(errno)
class BuildDocsCommand(setuptools.command.build_py.build_py):
apidoc_command = (
'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'
)
doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')
def run(self):
errOne = call(self.apidoc_command)
errTwo = call(self.doc_command)
sys.exit(errOne or errTwo)
# Generate API documentation only if we are running on readthedocs.io
on_rtd = getenv('READTHEDOCS', None) is not None
if on_rtd:
call(BuildDocsCommand.apidoc_command)
if 'dev' in VERSION:
current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',
'-b {}'.format(current_version)])
VERSION = get_version()
with open('requirements.txt') as requirements:
required = requirements.read().splitlines()
with open('test-requirements.txt') as requirements:
test_required = requirements.read().splitlines()
with open('README.rst') as readme:
long_description = readme.read()
if __name__ == '__main__':
if platform.system() != 'Windows':
data_files = [('.', ['coala.1'])]
else:
data_files = [('.', [])]
setup(name='coala',
version=VERSION,
description='Linting and Fixing Code for All Languages',
author='The coala developers',
author_email='[email protected]',
maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger'
if not on_rtd else 'L.S., F.N., M.K.',
maintainer_email=('[email protected], '
'[email protected], '
'[email protected]'),
url='http://coala.io/',
platforms='any',
packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
install_requires=required,
tests_require=test_required,
package_data={'coalib': ['default_coafile', 'VERSION',
'bearlib/languages/documentation/*.coalang']
},
license='AGPL-3.0',
data_files=data_files,
long_description=long_description,
entry_points={
'console_scripts': [
'coala = coalib.coala:main',
'coala-ci = coalib.coala_ci:main',
'coala-json = coalib.coala_json:main',
'coala-format = coalib.coala_format:main',
'coala-delete-orig = coalib.coala_delete_orig:main']},
# from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications :: Gnome',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License '
'v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Text Processing :: Linguistic'],
cmdclass={'build_manpage': BuildManPage,
'build_py': BuildPyCommand,
'docs': BuildDocsCommand,
'test': PyTestCommand})
| agpl-3.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Tools/framer/framer/function.py | 48 | 4246 | """Functions."""
from framer import template
from framer.util import cstring, unindent
METH_O = "METH_O"
METH_NOARGS = "METH_NOARGS"
METH_VARARGS = "METH_VARARGS"
def parsefmt(fmt):
for c in fmt:
if c == '|':
continue
yield c
class Argument:
def __init__(self, name):
self.name = name
self.ctype = "PyObject *"
self.default = None
def __str__(self):
return "%s%s" % (self.ctype, self.name)
def setfmt(self, code):
self.ctype = self._codes[code]
if self.ctype[-1] != "*":
self.ctype += " "
_codes = {"O": "PyObject *",
"i": "int",
}
def decl(self):
if self.default is None:
return str(self) + ";"
else:
return "%s = %s;" % (self, self.default)
class _ArgumentList(object):
# these instance variables should be initialized by subclasses
ml_meth = None
fmt = None
def __init__(self, args):
self.args = map(Argument, args)
def __len__(self):
return len(self.args)
def __getitem__(self, i):
return self.args[i]
def dump_decls(self, f):
pass
class NoArgs(_ArgumentList):
def __init__(self, args):
assert len(args) == 0
super(NoArgs, self).__init__(args)
self.ml_meth = METH_NOARGS
def c_args(self):
return "PyObject *self"
class OneArg(_ArgumentList):
def __init__(self, args):
assert len(args) == 1
super(OneArg, self).__init__(args)
self.ml_meth = METH_O
def c_args(self):
return "PyObject *self, %s" % self.args[0]
class VarArgs(_ArgumentList):
def __init__(self, args, fmt=None):
super(VarArgs, self).__init__(args)
self.ml_meth = METH_VARARGS
if fmt is not None:
self.fmt = fmt
i = 0
for code in parsefmt(fmt):
self.args[i].setfmt(code)
i += 1
def c_args(self):
return "PyObject *self, PyObject *args"
def targets(self):
return ", ".join(["&%s" % a.name for a in self.args])
def dump_decls(self, f):
for a in self.args:
print >> f, " %s" % a.decl()
def ArgumentList(func, method):
code = func.func_code
args = code.co_varnames[:code.co_argcount]
if method:
args = args[1:]
pyarg = getattr(func, "pyarg", None)
if pyarg is not None:
args = VarArgs(args, pyarg)
if func.func_defaults:
L = list(func.func_defaults)
ndefault = len(L)
i = len(args) - ndefault
while L:
args[i].default = L.pop(0)
return args
else:
if len(args) == 0:
return NoArgs(args)
elif len(args) == 1:
return OneArg(args)
else:
return VarArgs(args)
class Function:
method = False
def __init__(self, func, parent):
self._func = func
self._parent = parent
self.analyze()
self.initvars()
def dump(self, f):
def p(templ, vars=None): # helper function to generate output
if vars is None:
vars = self.vars
print >> f, templ % vars
if self.__doc__:
p(template.docstring)
d = {"name" : self.vars["CName"],
"args" : self.args.c_args(),
}
p(template.funcdef_start, d)
self.args.dump_decls(f)
if self.args.ml_meth == METH_VARARGS:
p(template.varargs)
p(template.funcdef_end)
def analyze(self):
self.__doc__ = self._func.__doc__
self.args = ArgumentList(self._func, self.method)
def initvars(self):
v = self.vars = {}
v["PythonName"] = self._func.__name__
s = v["CName"] = "%s_%s" % (self._parent.name, self._func.__name__)
v["DocstringVar"] = s + "_doc"
v["MethType"] = self.args.ml_meth
if self.__doc__:
v["Docstring"] = cstring(unindent(self.__doc__))
if self.args.fmt is not None:
v["ArgParse"] = self.args.fmt
v["ArgTargets"] = self.args.targets()
class Method(Function):
method = True
| apache-2.0 |
SaschaMester/delicium | chrome/test/mini_installer/file_verifier.py | 125 | 1116 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import verifier
class FileVerifier(verifier.Verifier):
"""Verifies that the current files match the expectation dictionaries."""
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Overridden from verifier.Verifier.
This method will throw an AssertionError if file state doesn't match the
|expectation|.
Args:
expectation_name: Path to the file being verified. It is expanded using
Expand.
expectation: A dictionary with the following key and value:
'exists' a boolean indicating whether the file should exist.
variable_expander: A VariableExpander object.
"""
file_path = variable_expander.Expand(expectation_name)
file_exists = os.path.exists(file_path)
assert expectation['exists'] == file_exists, \
('File %s exists' % file_path) if file_exists else \
('File %s is missing' % file_path)
| bsd-3-clause |
lmyrefelt/CouchPotatoServer | libs/requests/__init__.py | 62 | 1863 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '1.2.3'
__build__ = 0x010203
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from requests.packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| gpl-3.0 |
adrienbrault/home-assistant | tests/components/plex/test_init.py | 4 | 7416 | """Tests for Plex setup."""
import copy
from datetime import timedelta
import ssl
from unittest.mock import patch
import plexapi
import requests
import homeassistant.components.plex.const as const
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL, STATE_IDLE
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .const import DEFAULT_DATA, DEFAULT_OPTIONS, PLEX_DIRECT_URL
from .helpers import trigger_plex_update, wait_for_debouncer
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_set_config_entry_unique_id(hass, entry, mock_plex_server):
"""Test updating missing unique_id from config entry."""
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert (
hass.config_entries.async_entries(const.DOMAIN)[0].unique_id
== mock_plex_server.machine_identifier
)
async def test_setup_config_entry_with_error(hass, entry):
"""Test setup component from config entry with errors."""
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=requests.exceptions.ConnectionError,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id) is False
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_SETUP_RETRY
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=plexapi.exceptions.BadRequest,
):
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_SETUP_ERROR
async def test_setup_with_insecure_config_entry(hass, entry, setup_plex_server):
"""Test setup component with config."""
INSECURE_DATA = copy.deepcopy(DEFAULT_DATA)
INSECURE_DATA[const.PLEX_SERVER_CONFIG][CONF_VERIFY_SSL] = False
entry.data = INSECURE_DATA
await setup_plex_server(config_entry=entry)
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
async def test_unload_config_entry(hass, entry, mock_plex_server):
"""Test unloading a config entry."""
config_entries = hass.config_entries.async_entries(const.DOMAIN)
assert len(config_entries) == 1
assert entry is config_entries[0]
assert entry.state == ENTRY_STATE_LOADED
server_id = mock_plex_server.machine_identifier
loaded_server = hass.data[const.DOMAIN][const.SERVERS][server_id]
assert loaded_server == mock_plex_server
websocket = hass.data[const.DOMAIN][const.WEBSOCKETS][server_id]
await hass.config_entries.async_unload(entry.entry_id)
assert websocket.close.called
assert entry.state == ENTRY_STATE_NOT_LOADED
async def test_setup_with_photo_session(hass, entry, setup_plex_server):
"""Test setup component with config."""
await setup_plex_server(session_type="photo")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
await hass.async_block_till_done()
media_player = hass.states.get(
"media_player.plex_plex_for_android_tv_shield_android_tv"
)
assert media_player.state == STATE_IDLE
await wait_for_debouncer(hass)
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == "0"
async def test_setup_when_certificate_changed(
hass,
requests_mock,
empty_payload,
plex_server_accounts,
plex_server_default,
plextv_account,
plextv_resources,
plextv_shared_users,
):
"""Test setup component when the Plex certificate has changed."""
await async_setup_component(hass, "persistent_notification", {})
class WrongCertHostnameException(requests.exceptions.SSLError):
"""Mock the exception showing a mismatched hostname."""
def __init__(self):
self.__context__ = ssl.SSLCertVerificationError(
f"hostname '{old_domain}' doesn't match"
)
old_domain = "1-2-3-4.1111111111ffffff1111111111ffffff.plex.direct"
old_url = f"https://{old_domain}:32400"
OLD_HOSTNAME_DATA = copy.deepcopy(DEFAULT_DATA)
OLD_HOSTNAME_DATA[const.PLEX_SERVER_CONFIG][CONF_URL] = old_url
old_entry = MockConfigEntry(
domain=const.DOMAIN,
data=OLD_HOSTNAME_DATA,
options=DEFAULT_OPTIONS,
unique_id=DEFAULT_DATA["server_id"],
)
requests_mock.get("https://plex.tv/api/users/", text=plextv_shared_users)
requests_mock.get("https://plex.tv/api/invites/requested", text=empty_payload)
requests_mock.get("https://plex.tv/users/account", text=plextv_account)
requests_mock.get("https://plex.tv/api/resources", text=plextv_resources)
requests_mock.get(old_url, exc=WrongCertHostnameException)
# Test with account failure
requests_mock.get(f"{old_url}/accounts", status_code=401)
old_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state == ENTRY_STATE_SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with no servers found
requests_mock.get(f"{old_url}/accounts", text=plex_server_accounts)
requests_mock.get("https://plex.tv/api/resources", text=empty_payload)
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state == ENTRY_STATE_SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with success
new_url = PLEX_DIRECT_URL
requests_mock.get("https://plex.tv/api/resources", text=plextv_resources)
requests_mock.get(new_url, text=plex_server_default)
requests_mock.get(f"{new_url}/accounts", text=plex_server_accounts)
assert await hass.config_entries.async_setup(old_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert old_entry.state == ENTRY_STATE_LOADED
assert old_entry.data[const.PLEX_SERVER_CONFIG][CONF_URL] == new_url
async def test_tokenless_server(entry, setup_plex_server):
"""Test setup with a server with token auth disabled."""
TOKENLESS_DATA = copy.deepcopy(DEFAULT_DATA)
TOKENLESS_DATA[const.PLEX_SERVER_CONFIG].pop(CONF_TOKEN, None)
entry.data = TOKENLESS_DATA
await setup_plex_server(config_entry=entry)
assert entry.state == ENTRY_STATE_LOADED
async def test_bad_token_with_tokenless_server(
hass, entry, mock_websocket, setup_plex_server, requests_mock
):
"""Test setup with a bad token and a server with token auth disabled."""
requests_mock.get("https://plex.tv/users/account", status_code=401)
await setup_plex_server()
assert entry.state == ENTRY_STATE_LOADED
# Ensure updates that rely on account return nothing
trigger_plex_update(mock_websocket)
await hass.async_block_till_done()
| mit |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/email/header.py | 3 | 23088 | # Copyright (C) 2002-2007 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email import charset as _charset
Charset = _charset.Charset
NL = '\n'
SPACE = ' '
BSPACE = b' '
SPACE8 = ' ' * 8
EMPTYSTRING = ''
MAXLINELEN = 78
FWS = ' \t'
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (string, charset) pairs containing each of the decoded
parts of the header. Charset is None for non-encoded parts of the header,
otherwise a lower-case string containing the name of the character set
specified in the encoded string.
header may be a string that may or may not contain RFC2047 encoded words,
or it may be a Header object.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If it is a Header object, we can just return the encoded chunks.
if hasattr(header, '_chunks'):
return [(_charset._encode(string, str(charset)), str(charset))
for string, charset in header._chunks]
# If no encoding, just return the header with no charset.
if not ecre.search(header):
return [(header, None)]
# First step is to parse all the encoded parts into triplets of the form
# (encoded_string, encoding, charset). For unencoded strings, the last
# two parts will be None.
words = []
for line in header.splitlines():
parts = ecre.split(line)
while parts:
unencoded = parts.pop(0).strip()
if unencoded:
words.append((unencoded, None, None))
if parts:
charset = parts.pop(0).lower()
encoding = parts.pop(0).lower()
encoded = parts.pop(0)
words.append((encoded, encoding, charset))
# The next step is to decode each encoded word by applying the reverse
# base64 or quopri transformation. decoded_words is now a list of the
# form (decoded_word, charset).
decoded_words = []
for encoded_string, encoding, charset in words:
if encoding is None:
# This is an unencoded word.
decoded_words.append((encoded_string, charset))
elif encoding == 'q':
word = email.quoprimime.header_decode(encoded_string)
decoded_words.append((word, charset))
elif encoding == 'b':
paderr = len(encoded_string) % 4 # Postel's law: add missing padding
if paderr:
encoded_string += '==='[:4 - paderr]
try:
word = email.base64mime.decode(encoded_string)
except binascii.Error:
raise HeaderParseError('Base64 decoding error')
else:
decoded_words.append((word, charset))
else:
raise AssertionError('Unexpected encoding: ' + encoding)
# Now convert all words to bytes and collapse consecutive runs of
# similarly encoded words.
collapsed = []
last_word = last_charset = None
for word, charset in decoded_words:
if isinstance(word, str):
word = bytes(word, 'raw-unicode-escape')
if last_word is None:
last_word = word
last_charset = charset
elif charset != last_charset:
collapsed.append((last_word, last_charset))
last_word = word
last_charset = charset
elif last_charset is None:
last_word += BSPACE + word
else:
last_word += word
collapsed.append((last_word, last_charset))
return collapsed
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicitly via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 78 as recommended
by RFC 2822.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
elif not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
self._maxlinelen = maxlinelen
if header_name is None:
self._headerlen = 0
else:
# Take the separating colon and space into account.
self._headerlen = len(header_name) + 2
def __str__(self):
"""Return the string value of the header."""
self._normalize()
uchunks = []
lastcs = None
for string, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if nextcs == _charset.UNKNOWN8BIT:
original_bytes = string.encode('ascii', 'surrogateescape')
string = original_bytes.decode('ascii', 'replace')
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(SPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(SPACE)
lastcs = nextcs
uchunks.append(string)
return EMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a unicode (of the unencoded header value), swap the
# args and do another comparison.
return other == str(self)
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is false), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In either case, when producing an RFC 2822 compliant
header using RFC 2047 rules, the string will be encoded using the
output codec of the charset. If the string cannot be encoded to the
output codec, a UnicodeError will be raised.
Optional `errors' is passed as the errors argument to the decode
call if s is a byte string.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
if not isinstance(s, str):
input_charset = charset.input_codec or 'us-ascii'
if input_charset == _charset.UNKNOWN8BIT:
s = s.decode('us-ascii', 'surrogateescape')
else:
s = s.decode(input_charset, errors)
# Ensure that the bytes we're storing can be decoded to the output
# character set, otherwise an early error is thrown.
output_charset = charset.output_codec or 'us-ascii'
if output_charset != _charset.UNKNOWN8BIT:
s.encode(output_charset, errors)
self._chunks.append((s, charset))
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
r"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
Optional maxlinelen specifies the maximum length of each generated
line, exclusive of the linesep string. Individual lines may be longer
than maxlinelen if a folding point cannot be found. The first line
will be shorter by the length of the header name plus ": " if a header
name was specified at Header construction time. The default value for
maxlinelen is determined at header construction time.
Optional splitchars is a string containing characters which should be
given extra weight by the splitting algorithm during normal header
wrapping. This is in very rough support of RFC 2822's `higher level
syntactic breaks': split points preceded by a splitchar are preferred
during line splitting, with the characters preferred in the order in
which they appear in the string. Space and tab may be included in the
string to indicate whether preference should be given to one over the
other as a split point when other split chars do not appear in the line
being split. Splitchars does not affect RFC 2047 encoded lines.
Optional linesep is a string to be used to separate the lines of
the value. The default value is the most useful for typical
Python applications, but it can be set to \r\n to produce RFC-compliant
line separators when needed.
"""
self._normalize()
if maxlinelen is None:
maxlinelen = self._maxlinelen
# A maxlinelen of 0 means don't wrap. For all practical purposes,
# choosing a huge number here accomplishes that and makes the
# _ValueFormatter algorithm much simpler.
if maxlinelen == 0:
maxlinelen = 1000000
formatter = _ValueFormatter(self._headerlen, maxlinelen,
self._continuation_ws, splitchars)
for string, charset in self._chunks:
lines = string.splitlines()
if lines:
formatter.feed('', lines[0], charset)
else:
formatter.feed('', '', charset)
for line in lines[1:]:
formatter.newline()
if charset.header_encoding is not None:
formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
charset)
else:
sline = line.lstrip()
fws = line[:len(line)-len(sline)]
formatter.feed(fws, sline, charset)
if len(lines) > 1:
formatter.newline()
formatter.add_transition()
value = formatter._str(linesep)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _normalize(self):
# Step 1: Normalize the chunks so that all runs of identical charsets
# get collapsed into a single unicode string.
chunks = []
last_charset = None
last_chunk = []
for string, charset in self._chunks:
if charset == last_charset:
last_chunk.append(string)
else:
if last_charset is not None:
chunks.append((SPACE.join(last_chunk), last_charset))
last_chunk = [string]
last_charset = charset
if last_chunk:
chunks.append((SPACE.join(last_chunk), last_charset))
self._chunks = chunks
class _ValueFormatter:
def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
self._maxlen = maxlen
self._continuation_ws = continuation_ws
self._continuation_ws_len = len(continuation_ws)
self._splitchars = splitchars
self._lines = []
self._current_line = _Accumulator(headerlen)
def _str(self, linesep):
self.newline()
return linesep.join(self._lines)
def __str__(self):
return self._str(NL)
def newline(self):
end_of_line = self._current_line.pop()
if end_of_line != (' ', ''):
self._current_line.push(*end_of_line)
if len(self._current_line) > 0:
if self._current_line.is_onlyws():
self._lines[-1] += str(self._current_line)
else:
self._lines.append(str(self._current_line))
self._current_line.reset()
def add_transition(self):
self._current_line.push(' ', '')
def feed(self, fws, string, charset):
# If the charset has no header encoding (i.e. it is an ASCII encoding)
# then we must split the header at the "highest level syntactic break"
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace. Eventually, this should be pluggable.
if charset.header_encoding is None:
self._ascii_split(fws, string, self._splitchars)
return
# Otherwise, we're doing either a Base64 or a quoted-printable
# encoding which means we don't need to split the line on syntactic
# breaks. We can basically just find enough characters to fit on the
# current line, minus the RFC 2047 chrome. What makes this trickier
# though is that we have to split at octet boundaries, not character
# boundaries but it's only safe to split at character boundaries so at
# best we can only get close.
encoded_lines = charset.header_encode_lines(string, self._maxlengths())
# The first element extends the current line, but if it's None then
# nothing more fit on the current line so start a new line.
try:
first_line = encoded_lines.pop(0)
except IndexError:
# There are no encoded lines, so we're done.
return
if first_line is not None:
self._append_chunk(fws, first_line)
try:
last_line = encoded_lines.pop()
except IndexError:
# There was only one line.
return
self.newline()
self._current_line.push(self._continuation_ws, last_line)
# Everything else are full lines in themselves.
for line in encoded_lines:
self._lines.append(self._continuation_ws + line)
def _maxlengths(self):
# The first line's length.
yield self._maxlen - len(self._current_line)
while True:
yield self._maxlen - self._continuation_ws_len
def _ascii_split(self, fws, string, splitchars):
# The RFC 2822 header folding algorithm is simple in principle but
# complex in practice. Lines may be folded any place where "folding
# white space" appears by inserting a linesep character in front of the
# FWS. The complication is that not all spaces or tabs qualify as FWS,
# and we are also supposed to prefer to break at "higher level
# syntactic breaks". We can't do either of these without intimate
# knowledge of the structure of structured headers, which we don't have
# here. So the best we can do here is prefer to break at the specified
# splitchars, and hope that we don't choose any spaces or tabs that
# aren't legal FWS. (This is at least better than the old algorithm,
# where we would sometimes *introduce* FWS after a splitchar, or the
# algorithm before that, where we would turn all white space runs into
# single spaces or tabs.)
parts = re.split("(["+FWS+"]+)", fws+string)
if parts[0]:
parts[:0] = ['']
else:
parts.pop(0)
for fws, part in zip(*[iter(parts)]*2):
self._append_chunk(fws, part)
def _append_chunk(self, fws, string):
self._current_line.push(fws, string)
if len(self._current_line) > self._maxlen:
# Find the best split point, working backward from the end.
# There might be none, on a long first line.
for ch in self._splitchars:
for i in range(self._current_line.part_count()-1, 0, -1):
if ch.isspace():
fws = self._current_line[i][0]
if fws and fws[0]==ch:
break
prevpart = self._current_line[i-1][1]
if prevpart and prevpart[-1]==ch:
break
else:
continue
break
else:
fws, part = self._current_line.pop()
if self._current_line._initial_size > 0:
# There will be a header, so leave it on a line by itself.
self.newline()
if not fws:
# We don't use continuation_ws here because the whitespace
# after a header should always be a space.
fws = ' '
self._current_line.push(fws, part)
return
remainder = self._current_line.pop_from(i)
self._lines.append(str(self._current_line))
self._current_line.reset(remainder)
class _Accumulator(list):
def __init__(self, initial_size=0):
self._initial_size = initial_size
super().__init__()
def push(self, fws, string):
self.append((fws, string))
def pop_from(self, i=0):
popped = self[i:]
self[i:] = []
return popped
def pop(self):
if self.part_count()==0:
return ('', '')
return super().pop()
def __len__(self):
return sum((len(fws)+len(part) for fws, part in self),
self._initial_size)
def __str__(self):
return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
for fws, part in self))
def reset(self, startval=None):
if startval is None:
startval = []
self[:] = startval
self._initial_size = 0
def is_onlyws(self):
return self._initial_size==0 and (not self or str(self).isspace())
def part_count(self):
return super().__len__()
| gpl-2.0 |
DDEFISHER/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/pipes.py | 23 | 14147 | from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content(read_file=True))
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
offset = [0]
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
yield content[offset[0]:offset[0] + value]
offset[0] += value
elif item_type == "delay":
time.sleep(value)
elif item_type == "repeat":
if i != len(delays) - 1:
continue
while offset[0] < len(content):
for item in add_content(delays[-(value + 1):-1], True):
yield item
if not repeat and offset[0] < len(content):
yield content[offset[0]:]
response.content = add_content(delays)
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe(opt(nullable(str)))
def sub(request, response, escape_type="html"):
"""Substitute environment information about the server and request into the script.
:param escape_type: String detailing the type of escaping to use. Known values are
"html" and "none", with "html" the default for historic reasons.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content, escape_type=escape_type)
response.content = new_content
return response
def template(request, content, escape_type="html"):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
elif field == "url_base":
value = request.url_base
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
escape_func = {"html": lambda x:escape(x, quote=True),
"none": lambda x:x}[escape_type]
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape_func(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content, count = template_regexp.subn(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| mpl-2.0 |
joone/chromium-crosswalk | third_party/android_platform/development/scripts/symbol.py | 14 | 19655 | #!/usr/bin/python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for looking up symbolic debugging information.
The information can include symbol names, offsets, and source locations.
"""
import glob
import itertools
import logging
import os
import re
import struct
import subprocess
import sys
import zipfile
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'build', 'android'))
from pylib import constants
from pylib.symbols import elf_symbolizer
CHROME_SRC = constants.DIR_SOURCE_ROOT
ANDROID_BUILD_TOP = CHROME_SRC
SYMBOLS_DIR = CHROME_SRC
CHROME_SYMBOLS_DIR = None
ARCH = "arm"
TOOLCHAIN_INFO = None
# See:
# http://bugs.python.org/issue14315
# https://hg.python.org/cpython/rev/6dd5e9556a60#l2.8
def PatchZipFile():
oldDecodeExtra = zipfile.ZipInfo._decodeExtra
def decodeExtra(self):
try:
oldDecodeExtra(self)
except struct.error:
pass
zipfile.ZipInfo._decodeExtra = decodeExtra
PatchZipFile()
def Uname():
"""'uname' for constructing prebuilt/<...> and out/host/<...> paths."""
uname = os.uname()[0]
if uname == "Darwin":
proc = os.uname()[-1]
if proc == "i386" or proc == "x86_64":
return "darwin-x86"
return "darwin-ppc"
if uname == "Linux":
return "linux-x86"
return uname
def ToolPath(tool, toolchain_info=None):
"""Return a full qualified path to the specified tool"""
# ToolPath looks for the tools in the completely incorrect directory.
# This looks in the checked in android_tools.
if ARCH == "arm":
toolchain_source = "arm-linux-androideabi-4.9"
toolchain_prefix = "arm-linux-androideabi"
ndk = "ndk"
elif ARCH == "arm64":
toolchain_source = "aarch64-linux-android-4.9"
toolchain_prefix = "aarch64-linux-android"
ndk = "ndk"
elif ARCH == "x86":
toolchain_source = "x86-4.9"
toolchain_prefix = "i686-linux-android"
ndk = "ndk"
elif ARCH == "x86_64" or ARCH == "x64":
toolchain_source = "x86_64-4.9"
toolchain_prefix = "x86_64-linux-android"
ndk = "ndk"
elif ARCH == "mips":
toolchain_source = "mipsel-linux-android-4.9"
toolchain_prefix = "mipsel-linux-android"
ndk = "ndk"
else:
raise Exception("Could not find tool chain")
toolchain_subdir = (
"third_party/android_tools/%s/toolchains/%s/prebuilt/linux-x86_64/bin" %
(ndk, toolchain_source))
return os.path.join(CHROME_SRC,
toolchain_subdir,
toolchain_prefix + "-" + tool)
def FindToolchain():
"""Look for the latest available toolchain
Args:
None
Returns:
A pair of strings containing toolchain label and target prefix.
"""
global TOOLCHAIN_INFO
if TOOLCHAIN_INFO is not None:
return TOOLCHAIN_INFO
## Known toolchains, newer ones in the front.
gcc_version = "4.9"
if ARCH == "arm64":
known_toolchains = [
("aarch64-linux-android-" + gcc_version, "aarch64", "aarch64-linux-android")
]
elif ARCH == "arm":
known_toolchains = [
("arm-linux-androideabi-" + gcc_version, "arm", "arm-linux-androideabi")
]
elif ARCH =="x86":
known_toolchains = [
("x86-" + gcc_version, "x86", "i686-linux-android")
]
elif ARCH =="x86_64" or ARCH =="x64":
known_toolchains = [
("x86_64-" + gcc_version, "x86_64", "x86_64-linux-android")
]
elif ARCH == "mips":
known_toolchains = [
("mipsel-linux-android-" + gcc_version, "mips", "mipsel-linux-android")
]
else:
known_toolchains = []
logging.debug('FindToolcahin: known_toolchains=%s' % known_toolchains)
# Look for addr2line to check for valid toolchain path.
for (label, platform, target) in known_toolchains:
toolchain_info = (label, platform, target);
if os.path.exists(ToolPath("addr2line", toolchain_info)):
TOOLCHAIN_INFO = toolchain_info
print ("Using toolchain from: "
+ os.path.normpath(ToolPath("", TOOLCHAIN_INFO)))
return toolchain_info
raise Exception("Could not find tool chain")
def GetAapt():
"""Returns the path to aapt.
Args:
None
Returns:
the pathname of the 'aapt' executable.
"""
sdk_home = os.path.join('third_party', 'android_tools', 'sdk')
sdk_home = os.environ.get('SDK_HOME', sdk_home)
aapt_exe = glob.glob(os.path.join(sdk_home, 'build-tools', '*', 'aapt'))
if not aapt_exe:
return None
return sorted(aapt_exe, key=os.path.getmtime, reverse=True)[0]
def ApkMatchPackageName(aapt, apk_path, package_name):
"""Returns true the APK's package name matches package_name.
Args:
aapt: pathname for the 'aapt' executable.
apk_path: pathname of the APK file.
package_name: package name to match.
Returns:
True if the package name matches or aapt is None, False otherwise.
"""
if not aapt:
# Allow false positives
return True
aapt_output = subprocess.check_output(
[aapt, 'dump', 'badging', apk_path]).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
match = package_name_re.match(line)
if match:
return package_name == match.group(1)
return False
def PathListJoin(prefix_list, suffix_list):
"""Returns each prefix in prefix_list joined with each suffix in suffix list.
Args:
prefix_list: list of path prefixes.
suffix_list: list of path suffixes.
Returns:
List of paths each of which joins a prefix with a suffix.
"""
return [
os.path.join(prefix, suffix)
for suffix in suffix_list for prefix in prefix_list ]
def _GetChromeOutputDirCandidates():
"""Returns a list of output directories to look in."""
if os.environ.get('CHROMIUM_OUTPUT_DIR') or os.environ.get('BUILDTYPE'):
return [constants.GetOutDirectory()]
return [constants.GetOutDirectory(build_type='Debug'),
constants.GetOutDirectory(build_type='Release')]
def GetCandidates(dirs, filepart, candidate_fun):
"""Returns a list of candidate filenames, sorted by modification time.
Args:
dirs: a list of the directory part of the pathname.
filepart: the file part of the pathname.
candidate_fun: a function to apply to each candidate, returns a list.
Returns:
A list of candidate files ordered by modification time, newest first.
"""
candidates = PathListJoin(dirs, [filepart])
logging.debug('GetCandidates: prefiltered candidates = %s' % candidates)
candidates = list(
itertools.chain.from_iterable(map(candidate_fun, candidates)))
candidates.sort(key=os.path.getmtime, reverse=True)
return candidates
def GetCandidateApks():
"""Returns a list of APKs which could contain the library.
Args:
None
Returns:
list of APK filename which could contain the library.
"""
dirs = PathListJoin(_GetChromeOutputDirCandidates(), ['apks'])
return GetCandidates(dirs, '*.apk', glob.glob)
def GetCrazyLib(apk_filename):
"""Returns the name of the first crazy library from this APK.
Args:
apk_filename: name of an APK file.
Returns:
Name of the first library which would be crazy loaded from this APK.
"""
zip_file = zipfile.ZipFile(apk_filename, 'r')
for filename in zip_file.namelist():
match = re.match('lib/[^/]*/crazy.(lib.*[.]so)', filename)
if match:
return match.group(1)
def GetApkFromLibrary(device_library_path):
match = re.match(r'.*/([^/]*)-[0-9]+(\/[^/]*)?\.apk$', device_library_path)
if not match:
return None
return match.group(1)
def GetMatchingApks(package_name):
"""Find any APKs which match the package indicated by the device_apk_name.
Args:
device_apk_name: name of the APK on the device.
Returns:
A list of APK filenames which could contain the desired library.
"""
return filter(
lambda candidate_apk:
ApkMatchPackageName(GetAapt(), candidate_apk, package_name),
GetCandidateApks())
def MapDeviceApkToLibrary(device_apk_name):
"""Provide a library name which corresponds with device_apk_name.
Args:
device_apk_name: name of the APK on the device.
Returns:
Name of the library which corresponds to that APK.
"""
matching_apks = GetMatchingApks(device_apk_name)
logging.debug('MapDeviceApkToLibrary: matching_apks=%s' % matching_apks)
for matching_apk in matching_apks:
crazy_lib = GetCrazyLib(matching_apk)
if crazy_lib:
return crazy_lib
def GetLibrarySearchPaths():
if CHROME_SYMBOLS_DIR:
return [CHROME_SYMBOLS_DIR]
dirs = _GetChromeOutputDirCandidates()
# GYP places unstripped libraries under out/$BUILDTYPE/lib
# GN places them under out/$BUILDTYPE/lib.unstripped
return PathListJoin(dirs, ['lib.unstripped', 'lib', '.'])
def GetCandidateLibraries(library_name):
"""Returns a list of candidate library filenames.
Args:
library_name: basename of the library to match.
Returns:
A list of matching library filenames for library_name.
"""
candidates = GetCandidates(
GetLibrarySearchPaths(), library_name,
lambda filename: filter(os.path.exists, [filename]))
# For GN, candidates includes both stripped an unstripped libraries. Stripped
# libraries are always newer. Explicitly look for .unstripped and sort them
# ahead.
candidates.sort(key=lambda c: int('unstripped' not in c))
return candidates
def TranslateLibPath(lib):
# The filename in the stack trace maybe an APK name rather than a library
# name. This happens when the library was loaded directly from inside the
# APK. If this is the case we try to figure out the library name by looking
# for a matching APK file and finding the name of the library in contains.
# The name of the APK file on the device is of the form
# <package_name>-<number>.apk. The APK file on the host may have any name
# so we look at the APK badging to see if the package name matches.
apk = GetApkFromLibrary(lib)
if apk is not None:
logging.debug('TranslateLibPath: apk=%s' % apk)
mapping = MapDeviceApkToLibrary(apk)
if mapping:
lib = mapping
# SymbolInformation(lib, addr) receives lib as the path from symbols
# root to the symbols file. This needs to be translated to point to the
# correct .so path. If the user doesn't explicitly specify which directory to
# use, then use the most recently updated one in one of the known directories.
# If the .so is not found somewhere in CHROME_SYMBOLS_DIR, leave it
# untranslated in case it is an Android symbol in SYMBOLS_DIR.
library_name = os.path.basename(lib)
logging.debug('TranslateLibPath: lib=%s library_name=%s' % (lib, library_name))
candidate_libraries = GetCandidateLibraries(library_name)
logging.debug('TranslateLibPath: candidate_libraries=%s' % candidate_libraries)
if not candidate_libraries:
return lib
library_path = os.path.relpath(candidate_libraries[0], SYMBOLS_DIR)
logging.debug('TranslateLibPath: library_path=%s' % library_path)
return '/' + library_path
def SymbolInformation(lib, addr, get_detailed_info):
"""Look up symbol information about an address.
Args:
lib: library (or executable) pathname containing symbols
addr: string hexidecimal address
Returns:
A list of the form [(source_symbol, source_location,
object_symbol_with_offset)].
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first. The list is
always non-empty, even if no information is available.
Usually you want to display the source_location and
object_symbol_with_offset from the last element in the list.
"""
lib = TranslateLibPath(lib)
info = SymbolInformationForSet(lib, set([addr]), get_detailed_info)
return (info and info.get(addr)) or [(None, None, None)]
def SymbolInformationForSet(lib, unique_addrs, get_detailed_info):
"""Look up symbol information for a set of addresses from the given library.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of hexidecimal addresses
Returns:
A dictionary of the form {addr: [(source_symbol, source_location,
object_symbol_with_offset)]} where each address has a list of
associated symbols and locations. The list is always non-empty.
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first. The list is
always non-empty, even if no information is available.
Usually you want to display the source_location and
object_symbol_with_offset from the last element in the list.
"""
if not lib:
return None
addr_to_line = CallAddr2LineForSet(lib, unique_addrs)
if not addr_to_line:
return None
if get_detailed_info:
addr_to_objdump = CallObjdumpForSet(lib, unique_addrs)
if not addr_to_objdump:
return None
else:
addr_to_objdump = dict((addr, ("", 0)) for addr in unique_addrs)
result = {}
for addr in unique_addrs:
source_info = addr_to_line.get(addr)
if not source_info:
source_info = [(None, None)]
if addr in addr_to_objdump:
(object_symbol, object_offset) = addr_to_objdump.get(addr)
object_symbol_with_offset = FormatSymbolWithOffset(object_symbol,
object_offset)
else:
object_symbol_with_offset = None
result[addr] = [(source_symbol, source_location, object_symbol_with_offset)
for (source_symbol, source_location) in source_info]
return result
class MemoizedForSet(object):
def __init__(self, fn):
self.fn = fn
self.cache = {}
def __call__(self, lib, unique_addrs):
lib_cache = self.cache.setdefault(lib, {})
no_cache = filter(lambda x: x not in lib_cache, unique_addrs)
if no_cache:
lib_cache.update((k, None) for k in no_cache)
result = self.fn(lib, no_cache)
if result:
lib_cache.update(result)
return dict((k, lib_cache[k]) for k in unique_addrs if lib_cache[k])
@MemoizedForSet
def CallAddr2LineForSet(lib, unique_addrs):
"""Look up line and symbol information for a set of addresses.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of string hexidecimal addresses look up.
Returns:
A dictionary of the form {addr: [(symbol, file:line)]} where
each address has a list of associated symbols and locations
or an empty list if no symbol information was found.
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first.
"""
if not lib:
return None
symbols = SYMBOLS_DIR + lib
if not os.path.splitext(symbols)[1] in ['', '.so', '.apk']:
return None
if not os.path.isfile(symbols):
return None
addrs = sorted(unique_addrs)
result = {}
def _Callback(sym, addr):
records = []
while sym: # Traverse all the inlines following the |inlined_by| chain.
if sym.source_path and sym.source_line:
location = '%s:%d' % (sym.source_path, sym.source_line)
else:
location = None
records += [(sym.name, location)]
sym = sym.inlined_by
result[addr] = records
(label, platform, target) = FindToolchain()
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path=symbols,
addr2line_path=ToolPath("addr2line"),
callback=_Callback,
inlines=True)
for addr in addrs:
symbolizer.SymbolizeAsync(int(addr, 16), addr)
symbolizer.Join()
return result
def StripPC(addr):
"""Strips the Thumb bit a program counter address when appropriate.
Args:
addr: the program counter address
Returns:
The stripped program counter address.
"""
global ARCH
if ARCH == "arm":
return addr & ~1
return addr
@MemoizedForSet
def CallObjdumpForSet(lib, unique_addrs):
"""Use objdump to find out the names of the containing functions.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of string hexidecimal addresses to find the functions for.
Returns:
A dictionary of the form {addr: (string symbol, offset)}.
"""
if not lib:
return None
symbols = SYMBOLS_DIR + lib
if not os.path.exists(symbols):
return None
symbols = SYMBOLS_DIR + lib
if not os.path.exists(symbols):
return None
result = {}
# Function lines look like:
# 000177b0 <android::IBinder::~IBinder()+0x2c>:
# We pull out the address and function first. Then we check for an optional
# offset. This is tricky due to functions that look like "operator+(..)+0x2c"
func_regexp = re.compile("(^[a-f0-9]*) \<(.*)\>:$")
offset_regexp = re.compile("(.*)\+0x([a-f0-9]*)")
# A disassembly line looks like:
# 177b2: b510 push {r4, lr}
asm_regexp = re.compile("(^[ a-f0-9]*):[ a-f0-0]*.*$")
for target_addr in unique_addrs:
start_addr_dec = str(StripPC(int(target_addr, 16)))
stop_addr_dec = str(StripPC(int(target_addr, 16)) + 8)
cmd = [ToolPath("objdump"),
"--section=.text",
"--demangle",
"--disassemble",
"--start-address=" + start_addr_dec,
"--stop-address=" + stop_addr_dec,
symbols]
current_symbol = None # The current function symbol in the disassembly.
current_symbol_addr = 0 # The address of the current function.
stream = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
for line in stream:
# Is it a function line like:
# 000177b0 <android::IBinder::~IBinder()>:
components = func_regexp.match(line)
if components:
# This is a new function, so record the current function and its address.
current_symbol_addr = int(components.group(1), 16)
current_symbol = components.group(2)
# Does it have an optional offset like: "foo(..)+0x2c"?
components = offset_regexp.match(current_symbol)
if components:
current_symbol = components.group(1)
offset = components.group(2)
if offset:
current_symbol_addr -= int(offset, 16)
# Is it an disassembly line like:
# 177b2: b510 push {r4, lr}
components = asm_regexp.match(line)
if components:
addr = components.group(1)
i_addr = int(addr, 16)
i_target = StripPC(int(target_addr, 16))
if i_addr == i_target:
result[target_addr] = (current_symbol, i_target - current_symbol_addr)
stream.close()
return result
def CallCppFilt(mangled_symbol):
cmd = [ToolPath("c++filt")]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(mangled_symbol)
process.stdin.write("\n")
process.stdin.close()
demangled_symbol = process.stdout.readline().strip()
process.stdout.close()
return demangled_symbol
def FormatSymbolWithOffset(symbol, offset):
if offset == 0:
return symbol
return "%s+%d" % (symbol, offset)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.