desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Encode a GNTP Registration Message
@return: GNTP Registration Message ready to be sent'
| def encode(self):
| self.validate()
SEP = u': '
EOL = u'\r\n'
message = (self.format_info() + EOL)
for (k, v) in self.headers.iteritems():
message += (((k.encode('utf8') + SEP) + str(v).encode('utf8')) + EOL)
if (len(self.notifications) > 0):
for notice in self.notifications:
message += EOL
for (k, v) in notice.iteritems():
message += (((k.encode('utf8') + SEP) + str(v).encode('utf8')) + EOL)
message += EOL
return message
|
'@param data: (Optional) See decode()
@param app: (Optional) Set Application-Name
@param name: (Optional) Set Notification-Name
@param title: (Optional) Set Notification Title
@param password: (Optional) Password to use while encoding/decoding messages'
| def __init__(self, data=None, app=None, name=None, title=None, password=None):
| _GNTPBase.__init__(self, 'NOTIFY')
self.resources = {}
self.requiredHeaders = ['Application-Name', 'Notification-Name', 'Notification-Title']
if data:
self.decode(data, password)
else:
self.set_password(password)
if app:
self.headers['Application-Name'] = app
if name:
self.headers['Notification-Name'] = name
if title:
self.headers['Notification-Title'] = title
self.add_origin_info()
|
'Decode existing GNTP Notification message
@param data: Message to decode.'
| def decode(self, data, password):
| self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for (i, part) in enumerate(parts):
if (i == 0):
continue
if (part.strip() == ''):
continue
notice = self.parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
self.resources[notice.get('Identifier')] = notice
|
'Encode a GNTP Notification Message
@return: GNTP Notification Message ready to be sent'
| def encode(self):
| self.validate()
SEP = u': '
EOL = u'\r\n'
message = (self.format_info() + EOL)
for (k, v) in self.headers.iteritems():
message += (((k + SEP) + unicode(v)) + EOL)
message += EOL
return message.encode('utf-8')
|
'@param data: (Optional) See _GNTPResponse.decode()
@param action: (Optional) Set type of action the OK Response is for'
| def __init__(self, data=None, action=None):
| _GNTPBase.__init__(self, '-OK')
self.requiredHeaders = ['Response-Action']
if data:
self.decode(data)
if action:
self.headers['Response-Action'] = action
self.add_origin_info()
|
'@param data: (Optional) See _GNTPResponse.decode()
@param errorcode: (Optional) Error code
@param errordesc: (Optional) Error Description'
| def __init__(self, data=None, errorcode=None, errordesc=None):
| _GNTPBase.__init__(self, '-ERROR')
self.requiredHeaders = ['Error-Code', 'Error-Description']
if data:
self.decode(data)
if errorcode:
self.headers['Error-Code'] = errorcode
self.headers['Error-Description'] = errordesc
self.add_origin_info()
|
'Return a list of all time zones known to the system.'
| def list():
| handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i) for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
|
'Create a FieldArray of <length> fields of class <elements_class>,
named "<name>[x]". The **elements_extra_args will be passed to the
constructor of each field when yielded.'
| def __init__(self, parent, name, elements_class, length, **elements_extra_args):
| FieldSet.__init__(self, parent, name)
self.array_elements_class = elements_class
self.array_length = length
self.array_elements_extra_args = elements_extra_args
|
'Initialize a CPIndex.
- target_type is the tuple of expected type for the target CPInfo
(if None, then there will be no type check)
- target_text_handler is a string transformation function used for
pretty printing the target str() result
- allow_zero states whether null index is allowed (sometimes, constant
pool index is optionnal)'
| def __init__(self, parent, name, description=None, target_types=None, target_text_handler=(lambda x: x), allow_zero=False):
| UInt16.__init__(self, parent, name, description)
if isinstance(target_types, str):
self.target_types = (target_types,)
else:
self.target_types = target_types
self.allow_zero = allow_zero
self.target_text_handler = target_text_handler
self.getOriginalDisplay = (lambda : self.value)
|
'Returns the target CPInfo field.'
| def get_cp_entry(self):
| assert (self.value < self['/constant_pool_count'].value)
if (self.allow_zero and (not self.value)):
return None
cp_entry = self[('/constant_pool/constant_pool[%d]' % self.value)]
assert isinstance(cp_entry, CPInfo)
if self.target_types:
assert (cp_entry.constant_type in self.target_types)
return cp_entry
|
'Returns a human-readable string representation of the constant pool
entry. It is used for pretty-printing of the CPIndex fields pointing
to it.'
| def __str__(self):
| if (self.constant_type == 'Utf8'):
return self['bytes'].value
elif (self.constant_type in ('Integer', 'Float', 'Long', 'Double')):
return self['bytes'].display
elif (self.constant_type == 'Class'):
class_name = str(self['name_index'].get_cp_entry())
return class_name.replace('/', '.')
elif (self.constant_type == 'String'):
return str(self['string_index'].get_cp_entry())
elif (self.constant_type == 'Fieldref'):
return ('%s (from %s)' % (self['name_and_type_index'], self['class_index']))
elif (self.constant_type == 'Methodref'):
return ('%s (from %s)' % (self['name_and_type_index'], self['class_index']))
elif (self.constant_type == 'InterfaceMethodref'):
return ('%s (from %s)' % (self['name_and_type_index'], self['class_index']))
elif (self.constant_type == 'NameAndType'):
return parse_any_descriptor(str(self['descriptor_index'].get_cp_entry()), name=str(self['name_index'].get_cp_entry()))
else:
raise ParserError(('Not a valid constant pool element type: ' + self['tag'].value))
|
'Number of pages which can really be used for swapping:
number of page minus bad pages minus one page (used for the header)'
| def getPageCount(self):
| return ((self['last_page'].value - self['nb_badpage'].value) - 1)
|
'Read integer value (may raise ValueError)'
| def createValue(self):
| return int(self['value'].value)
|
'Create an Unicode description'
| def createDescription(self):
| return self.PARSER_TAGS['description']
|
'Create MIME type (string), eg. "image/png"
If it returns None, "application/octet-stream" is used.'
| def createMimeType(self):
| if ('mime' in self.PARSER_TAGS):
return self.PARSER_TAGS['mime'][0]
return None
|
'Check that the parser is able to parse the stream. Valid results:
- True: stream looks valid ;
- False: stream is invalid ;
- str: string describing the error.'
| def validate(self):
| raise NotImplementedError()
|
'Create filename suffix: "." + first value of self.PARSER_TAGS["file_ext"],
or None if self.PARSER_TAGS["file_ext"] doesn\'t exist.'
| def createFilenameSuffix(self):
| file_ext = self.getParserTags().get('file_ext')
if isinstance(file_ext, (tuple, list)):
file_ext = file_ext[0]
return (file_ext and ('.' + file_ext))
|
'with dxt2_mode on, this field will always use the four color model'
| def __init__(self, parent, name, dxt2_mode=False, *args, **kwargs):
| FieldSet.__init__(self, parent, name, *args, **kwargs)
self.dxt2_mode = dxt2_mode
|
'File is in EMF format?'
| def isEMF(self):
| if (1 <= self.current_length):
return (self[0].name == 'emf_header')
if (self.size < (44 * 8)):
return False
magic = EMF_Header.MAGIC
return (self.stream.readBytes((40 * 8), len(magic)) == magic)
|
'File is in Aldus Placeable Metafiles format?'
| def isAPM(self):
| if (1 <= self.current_length):
return (self[0].name == 'amf_header')
else:
magic = PlaceableHeader.MAGIC
return (self.stream.readBytes(0, len(magic)) == magic)
|
'Returns (value_size, array_size): value_size in bits and
array_size in number of items.'
| def getSizes(self):
| self.value_cls = self.ENTRY_FORMAT.get(self['type'].value, Bytes)
count = self['count'].value
if (self.value_cls in (String, Bytes)):
return ((8 * count), 1)
else:
return ((self.value_cls.static_size * count), count)
|
'Seek to byte address relative to parent address.'
| def seek(self, offset):
| padding = (offset - ((self.address + self.current_size) / 8))
if (0 < padding):
return createPaddingField(self, (padding * 8))
else:
return None
|
'Parse what is left of the block'
| def parseBody(self):
| size = (self['block_size'].value - (self.current_size // 8))
if (('has_added_size' in self['flags']) and self['flags/has_added_size'].value):
size += self['added_size'].value
if (size > 0):
(yield RawBytes(self, 'body', size, 'Body data'))
|
'Create modification date as Unicode string, may raise ValueError.'
| def getDatetime(self):
| timestamp = self.getOctal('mtime')
return timestampUNIX(timestamp)
|
'Read sampling rate. Returns None on error.'
| def getSampleRate(self):
| version = self['version'].value
rate = self['sampling_rate'].value
try:
return self.SAMPLING_RATES[version][rate]
except (KeyError, IndexError):
return None
|
'Read bit rate in bit/sec. Returns None on error.'
| def getBitRate(self):
| layer = (3 - self['layer'].value)
bit_rate = self['bit_rate'].value
if (bit_rate in (0, 15)):
return None
if (self['version'].value == 3):
dataset = self.BIT_RATES[1]
else:
dataset = self.BIT_RATES[2]
try:
return (dataset[layer][bit_rate] * 1000)
except (KeyError, IndexError):
return None
|
'Read frame size in bytes. Returns None on error.'
| def getFrameSize(self):
| frame_size = self.getBitRate()
if (not frame_size):
return None
sample_rate = self.getSampleRate()
if (not sample_rate):
return None
padding = int(self['use_padding'].value)
if (self['layer'].value == self.LAYER_III):
if (self['version'].value == self.MPEG_I):
return (((frame_size * 144) // sample_rate) + padding)
else:
return (((frame_size * 72) // sample_rate) + padding)
elif (self['layer'].value == self.LAYER_II):
return (((frame_size * 144) / sample_rate) + padding)
else:
frame_size = ((frame_size * 12) / sample_rate)
return ((frame_size + padding) * 4)
|
'Guess if frames are constant bit rate. If it returns False, you can
be sure that frames are variable bit rate. Otherwise, it looks like
constant bit rate (on first count fields).'
| def looksConstantBitRate(self, count=10):
| check_keys = ('version', 'layer', 'bit_rate')
last_field = None
for (index, field) in enumerate(self.array('frame')):
if last_field:
for key in check_keys:
if (field[key].value != last_field[key].value):
return False
last_field = field
if (index == count):
break
return True
|
'Get bit rate (number of bit per sample per channel),
may returns None if you unable to compute it.'
| def getBitsPerSample(self):
| return self.BITS_PER_SAMPLE.get(self['codec'].value)
|
'Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)'
| def print_(self, title=None, out=None, verbose=False, format='one-line'):
| if (out is None):
out = sys.stdout
if (format in ('file-ext', 'mime')):
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove('')
except ValueError:
pass
extensions |= set(file_ext)
extensions -= set(('',))
extensions = list(extensions)
extensions.sort()
text = ', '.join((str(item) for item in extensions))
if (format == 'file-ext'):
print >>out, ('File extensions: %s.' % text)
print >>out
print >>out, ('Total: %s file extensions.' % len(extensions))
else:
print >>out, ('MIME types: %s.' % text)
print >>out
print >>out, ('Total: %s MIME types.' % len(extensions))
return
if (format == 'trac'):
print >>out, '== List of parsers =='
print >>out
print >>out, ('Total: %s parsers' % len(self.parser_list))
print >>out
elif (format == 'one_line'):
if title:
print >>out, title
else:
print >>out, _('List of Hachoir parsers.')
print >>out
bycategory = self.bytag['category']
for category in sorted(bycategory.iterkeys()):
if (format == 'one_line'):
parser_list = [parser.PARSER_TAGS['id'] for parser in bycategory[category]]
parser_list.sort()
print >>out, ('- %s: %s' % (category.title(), ', '.join(parser_list)))
else:
if (format == 'rest'):
print >>out, category.replace('_', ' ').title()
print >>out, ('-' * len(category))
print >>out
elif (format == 'trac'):
print >>out, ('=== %s ===' % category.replace('_', ' ').title())
print >>out
else:
print >>out, ('[%s]' % category)
parser_list = sorted(bycategory[category], key=(lambda parser: parser.PARSER_TAGS['id']))
if (format == 'rest'):
for parser in parser_list:
tags = parser.getParserTags()
print >>out, ('* %s: %s' % (tags['id'], tags['description']))
elif (format == 'trac'):
for parser in parser_list:
tags = parser.getParserTags()
desc = tags['description']
desc = re.sub('([A-Z][a-z]+[A-Z][^ ]+)', '!\\1', desc)
print >>out, (' * %s: %s' % (tags['id'], desc))
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if (format != 'trac'):
print >>out, ('Total: %s parsers' % len(self.parser_list))
|
'Load all parsers from "hachoir.parser" module.
Return the list of loaded parsers.'
| def _load(self):
| if self.parser_list:
return self.parser_list
todo = []
from lib import hachoir_parser
module = hachoir_parser
for attrname in dir(module):
attr = getattr(module, attrname)
if isinstance(attr, types.ModuleType):
todo.append(attr)
for module in todo:
for name in dir(module):
attr = getattr(module, name)
if (isinstance(attr, type) and issubclass(attr, HachoirParser) and (attr not in (Parser, HachoirParser))):
self.add(attr)
assert (1 <= len(self.parser_list))
return self.parser_list
|
'Add a key (register ?)'
| def addkey(self, key):
| if (type(key) == str):
if (not (key in self._apikey)):
self._apikey.append(key)
elif (type(key) == list):
for k in key:
if (not (k in self._apikey)):
self._apikey.append(k)
|
'Removes a key (unregister ?)'
| def delkey(self, key):
| if (type(key) == str):
if (key in self._apikey):
self._apikey.remove(key)
elif (type(key) == list):
for k in key:
if (key in self._apikey):
self._apikey.remove(k)
|
'Sets the developer key (and check it has the good length)'
| def developerkey(self, developerkey):
| if ((type(developerkey) == str) and (len(developerkey) == 48)):
self._developerkey = developerkey
|
'Pushes a message on the registered API keys.
takes 5 arguments:
- (req) application: application name [256]
- (req) event: event name [1000]
- (req) description: description [10000]
- (opt) url: url [512]
- (opt) priority: from -2 (lowest) to 2 (highest) (def:0)
- (opt) batch_mode: call API 5 by 5 (def:False)
Warning: using batch_mode will return error only if all API keys are bad
cf: http://nma.usk.bz/api.php'
| def push(self, application='', event='', description='', url='', priority=0, batch_mode=False):
| datas = {'application': application[:256].encode('utf8'), 'event': event[:1024].encode('utf8'), 'description': description[:10000].encode('utf8'), 'priority': priority}
if url:
datas['url'] = url[:512]
if self._developerkey:
datas['developerkey'] = self._developerkey
results = {}
if (not batch_mode):
for key in self._apikey:
datas['apikey'] = key
res = self.callapi('POST', ADD_PATH, datas)
results[key] = res
else:
for i in range(0, len(self._apikey), 5):
datas['apikey'] = ','.join(self._apikey[i:(i + 5)])
res = self.callapi('POST', ADD_PATH, datas)
results[datas['apikey']] = res
return results
|
'Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (\', \', \': \'). To get the most compact JSON
representation you should specify (\',\', \':\') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can\'t otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.'
| def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None):
| self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if (separators is not None):
(self.item_separator, self.key_separator) = separators
if (default is not None):
self.default = default
self.encoding = encoding
|
'Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)'
| def default(self, o):
| raise TypeError((repr(o) + ' is not JSON serializable'))
|
'Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
\'{"foo": ["bar", "baz"]}\''
| def encode(self, o):
| if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if ((_encoding is not None) and (not (_encoding == 'utf-8'))):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
chunks = self.iterencode(o, _one_shot=True)
if (not isinstance(chunks, (list, tuple))):
chunks = list(chunks)
return ''.join(chunks)
|
'Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)'
| def iterencode(self, o, _one_shot=False):
| if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if (self.encoding != 'utf-8'):
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=(- INFINITY)):
if (o != o):
text = 'NaN'
elif (o == _inf):
text = 'Infinity'
elif (o == _neginf):
text = '-Infinity'
else:
return _repr(o)
if (not allow_nan):
raise ValueError(('Out of range float values are not JSON compliant: ' + repr(o)))
return text
if (_one_shot and (c_make_encoder is not None) and (not self.indent) and (not self.sort_keys)):
_iterencode = c_make_encoder(markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot)
return _iterencode(o, 0)
|
'``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.'
| def __init__(self, encoding=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True):
| self.encoding = encoding
self.object_hook = object_hook
self.parse_float = (parse_float or float)
self.parse_int = (parse_int or int)
self.parse_constant = (parse_constant or _CONSTANTS.__getitem__)
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
|
'Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)'
| def decode(self, s, _w=WHITESPACE.match):
| (obj, end) = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if (end != len(s)):
raise ValueError(errmsg('Extra data', s, end, len(s)))
return obj
|
'Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.'
| def raw_decode(self, s, idx=0):
| try:
(obj, end) = self.scan_once(s, idx)
except StopIteration:
raise ValueError('No JSON object could be decoded')
return (obj, end)
|
'Aggregate two event values.'
| def aggregate(self, val1, val2):
| assert (val1 is not None)
assert (val2 is not None)
return self._aggregator(val1, val2)
|
'Format an event value.'
| def format(self, val):
| assert (val is not None)
return self._formatter(val)
|
'Validate the edges.'
| def validate(self):
| for function in self.functions.itervalues():
for callee_id in function.calls.keys():
assert (function.calls[callee_id].callee_id == callee_id)
if (callee_id not in self.functions):
sys.stderr.write(('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)))
del function.calls[callee_id]
|
'Find cycles using Tarjan\'s strongly connected components algorithm.'
| def find_cycles(self):
| visited = set()
for function in self.functions.itervalues():
if (function not in visited):
self._tarjan(function, 0, [], {}, {}, visited)
cycles = []
for function in self.functions.itervalues():
if ((function.cycle is not None) and (function.cycle not in cycles)):
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write('Cycle:\n')
for member in cycle.functions:
sys.stderr.write((' DCTB %s\n' % member.name))
|
'Tarjan\'s strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan\'s_strongly_connected_components_algorithm'
| def _tarjan(self, function, order, stack, orders, lowlinks, visited):
| visited.add(function)
orders[function] = order
lowlinks[function] = order
order += 1
pos = len(stack)
stack.append(function)
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
if (callee not in orders):
order = self._tarjan(callee, order, stack, orders, lowlinks, visited)
lowlinks[function] = min(lowlinks[function], lowlinks[callee])
elif (callee in stack):
lowlinks[function] = min(lowlinks[function], orders[callee])
if (lowlinks[function] == orders[function]):
members = stack[pos:]
del stack[pos:]
if (len(members) > 1):
cycle = Cycle()
for member in members:
cycle.add_function(member)
return order
|
'Propagate function time ratio allong the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html'
| def integrate(self, outevent, inevent):
| assert (outevent not in self)
for function in self.functions.itervalues():
assert (outevent not in function)
assert (inevent in function)
for call in function.calls.itervalues():
assert (outevent not in call)
if (call.callee_id != function.id):
assert (CALL_RATIO in call)
for cycle in self.cycles:
total = inevent.null()
for function in self.functions.itervalues():
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
total = inevent.null()
for function in self.functions.itervalues():
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total
|
'Aggregate an event for the whole profile.'
| def aggregate(self, event):
| total = event.null()
for function in self.functions.itervalues():
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total
|
'Prune the profile'
| def prune(self, node_thres, edge_thres):
| for function in self.functions.itervalues():
try:
function[PRUNE_RATIO] = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
if (TOTAL_TIME_RATIO in call):
call[PRUNE_RATIO] = call[TOTAL_TIME_RATIO]
else:
try:
call[PRUNE_RATIO] = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent:
pass
for function_id in self.functions.keys():
function = self.functions[function_id]
try:
if (function[PRUNE_RATIO] < node_thres):
del self.functions[function_id]
except UndefinedEvent:
pass
for function in self.functions.itervalues():
for callee_id in function.calls.keys():
call = function.calls[callee_id]
try:
if ((callee_id not in self.functions) or (call[PRUNE_RATIO] < edge_thres)):
del function.calls[callee_id]
except UndefinedEvent:
pass
|
'Extract a structure from a match object, while translating the types in the process.'
| def translate(self, mo):
| attrs = {}
groupdict = mo.groupdict()
for (name, value) in groupdict.iteritems():
if (value is None):
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = value
return Struct(attrs)
|
'Parse the call graph.'
| def parse_cg(self):
| while (not self._cg_header_re.match(self.readline())):
pass
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
entry_lines = []
while (line != '\x0c'):
if (line and (not line.isspace())):
if self._cg_sep_re.match(line):
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline()
|
'Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color'
| def hsl_to_rgb(self, h, s, l):
| h = (h % 1.0)
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if (l <= 0.5):
m2 = (l * (s + 1.0))
else:
m2 = ((l + s) - (l * s))
m1 = ((l * 2.0) - m2)
r = self._hue_to_rgb(m1, m2, (h + (1.0 / 3.0)))
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, (h - (1.0 / 3.0)))
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b)
|
'Main program.'
| def main(self):
| parser = optparse.OptionParser(usage='\n DCTB %prog [options] [file] ...', version=('%%prog %s' % __version__))
parser.add_option('-o', '--output', metavar='FILE', type='string', dest='output', help='output filename [stdout]')
parser.add_option('-n', '--node-thres', metavar='PERCENTAGE', type='float', dest='node_thres', default=0.5, help='eliminate nodes below this threshold [default: %default]')
parser.add_option('-e', '--edge-thres', metavar='PERCENTAGE', type='float', dest='edge_thres', default=0.1, help='eliminate edges below this threshold [default: %default]')
parser.add_option('-f', '--format', type='choice', choices=('prof', 'oprofile', 'pstats', 'shark'), dest='format', default='prof', help='profile format: prof, oprofile, or pstats [default: %default]')
parser.add_option('-c', '--colormap', type='choice', choices=('color', 'pink', 'gray', 'bw'), dest='theme', default='color', help='color map: color, pink, gray, or bw [default: %default]')
parser.add_option('-s', '--strip', action='store_true', dest='strip', default=False, help='strip function parameters, template parameters, and const modifiers from demangled C++ function names')
parser.add_option('-w', '--wrap', action='store_true', dest='wrap', default=False, help='wrap function names')
(self.options, self.args) = parser.parse_args(sys.argv[1:])
if ((len(self.args) > 1) and (self.options.format != 'pstats')):
parser.error('incorrect number of arguments')
try:
self.theme = self.themes[self.options.theme]
except KeyError:
parser.error(("invalid colormap '%s'" % self.options.theme))
if (self.options.format == 'prof'):
if (not self.args):
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = GprofParser(fp)
elif (self.options.format == 'oprofile'):
if (not self.args):
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = OprofileParser(fp)
elif (self.options.format == 'pstats'):
if (not self.args):
parser.error('at least a file must be specified for pstats input')
parser = PstatsParser(*self.args)
elif (self.options.format == 'shark'):
if (not self.args):
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = SharkParser(fp)
else:
parser.error(("invalid format '%s'" % self.options.format))
self.profile = parser.parse()
if (self.options.output is None):
self.output = sys.stdout
else:
self.output = open(self.options.output, 'wt')
self.write_graph()
|
'Remove extraneous information from C++ demangled function names.'
| def strip_function_name(self, name):
| while True:
(name, n) = self._parenthesis_re.subn('', name)
if (not n):
break
name = self._const_re.sub('', name)
while True:
(name, n) = self._angles_re.subn('', name)
if (not n):
break
return name
|
'Split the function name on multiple lines.'
| def wrap_function_name(self, name):
| if (len(name) > 32):
ratio = (2.0 / 3.0)
height = max(int(((len(name) / (1.0 - ratio)) + 0.5)), 1)
width = max((len(name) / height), 32)
name = textwrap.fill(name, width, break_long_words=False)
name = name.replace(', ', ',')
name = name.replace('> >', '>>')
name = name.replace('> >', '>>')
return name
|
'Compress function name according to the user preferences.'
| def compress_function_name(self, name):
| if self.options.strip:
name = self.strip_function_name(name)
if self.options.wrap:
name = self.wrap_function_name(name)
return name
|
'Checks the auto-correction of show names is working.
It should correct the weirdly capitalised \'sCruBs\' to \'Scrubs\''
| def test_different_case(self):
| self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
|
'Checks shownames with spaces'
| def test_spaces(self):
| self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
|
'Checks numeric show names'
| def test_numeric(self):
| self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')
self.assertEquals(self.t['24']['seriesname'], '24')
|
'Iterating over a show returns each seasons'
| def test_show_iter(self):
| self.assertEquals(len([season for season in self.t['Life on Mars']]), 2)
|
'Iterating over a show returns episodes'
| def test_season_iter(self):
| self.assertEquals(len([episode for episode in self.t['Life on Mars'][1]]), 8)
|
'Checks episode overview is retrieved correctly.'
| def test_get_episode_overview(self):
| self.assertEquals(self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith('When a new copy of Doral, a Cylon who had been previously'), True)
|
'Check accessing series from episode instance'
| def test_get_parent(self):
| show = self.t['Battlestar Galactica (2003)']
season = show[1]
episode = show[1][1]
self.assertEquals(season.show, show)
self.assertEquals(episode.season, season)
self.assertEquals(episode.season.show, show)
|
'Checks exception is thrown when season doesn\'t exist.'
| def test_seasonnotfound(self):
| self.assertRaises(tvdb_seasonnotfound, (lambda : self.t['CNNNN'][10][1]))
|
'Checks exception is thrown when episode doesn\'t exist.'
| def test_shownotfound(self):
| self.assertRaises(tvdb_shownotfound, (lambda : self.t['the fake show thingy']))
|
'Checks exception is raised for non-existent episode'
| def test_episodenotfound(self):
| self.assertRaises(tvdb_episodenotfound, (lambda : self.t['Scrubs'][1][30]))
|
'Checks exception is thrown for if an attribute isn\'t found.'
| def test_attributenamenotfound(self):
| self.assertRaises(tvdb_attributenotfound, (lambda : self.t['CNNNN'][1][6]['afakeattributething']))
self.assertRaises(tvdb_attributenotfound, (lambda : self.t['CNNNN']['afakeattributething']))
|
'There should be only one result matching'
| def test_search_len(self):
| self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)
|
'Checks you can get the episode name of a search result'
| def test_search_checkname(self):
| self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')
self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')
|
'Checks search can return multiple results'
| def test_search_multiresults(self):
| self.assertEquals((len(self.t['Scrubs'].search('my first')) >= 3), True)
|
'Checks not supplying search info raises TypeError'
| def test_search_no_params_error(self):
| self.assertRaises(TypeError, (lambda : self.t['Scrubs'].search()))
|
'Checks the searching of a single season'
| def test_search_season(self):
| self.assertEquals(len(self.t['Scrubs'][1].search('First')), 3)
|
'Checks the searching of an entire show'
| def test_search_show(self):
| self.assertEquals(len(self.t['CNNNN'].search('CNNNN', key='episodename')), 3)
|
'Tests airedOn show method'
| def test_aired_on(self):
| sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2))
self.assertEquals(len(sr), 1)
self.assertEquals(sr[0]['episodename'], u'My First Day')
|
'Check the firstaired value is retrieved'
| def test_episode_data(self):
| self.assertEquals(self.t['lost']['firstaired'], '2004-09-22')
|
'Check repr() of Season'
| def test_repr_show(self):
| self.assertEquals(repr(self.t['CNNNN']), '<Show Chaser Non-Stop News Network (CNNNN) (containing 3 seasons)>')
|
'Check repr() of Season'
| def test_repr_season(self):
| self.assertEquals(repr(self.t['CNNNN'][1]), '<Season instance (containing 9 episodes)>')
|
'Check repr() of Episode'
| def test_repr_episode(self):
| self.assertEquals(repr(self.t['CNNNN'][1][1]), '<Episode 01x01 - Terror Alert>')
|
'Check valid_languages is up-to-date (compared to languages.xml)'
| def test_have_all_languages(self):
| et = self.t._getetsrc(('http://thetvdb.com/api/%s/languages.xml' % self.t.config['apikey']))
languages = [x.find('abbreviation').text for x in et.findall('Language')]
self.assertEquals(sorted(languages), sorted(self.t.config['valid_languages']))
|
'Check episode data is in French (language="fr")'
| def test_episode_name_french(self):
| t = tvdb_api.Tvdb(cache=True, language='fr')
self.assertEquals(t['scrubs'][1][1]['episodename'], 'Mon premier jour')
self.assertTrue(t['scrubs']['overview'].startswith(u'J.D. est un jeune m\xe9decin qui d\xe9bute'))
|
'Check episode data is in Spanish (language="es")'
| def test_episode_name_spanish(self):
| t = tvdb_api.Tvdb(cache=True, language='es')
self.assertEquals(t['scrubs'][1][1]['episodename'], 'Mi Primer Dia')
self.assertTrue(t['scrubs']['overview'].startswith(u'Scrubs es una divertida comedia'))
|
'Check selected language is used'
| def test_multilanguage_selection(self):
| class SelectEnglishUI(tvdb_ui.BaseUI, ):
def selectSeries(self, allSeries):
return [x for x in allSeries if (x['language'] == 'en')][0]
class SelectItalianUI(tvdb_ui.BaseUI, ):
def selectSeries(self, allSeries):
return [x for x in allSeries if (x['language'] == 'it')][0]
t_en = tvdb_api.Tvdb(cache=True, custom_ui=SelectEnglishUI, language='en')
t_it = tvdb_api.Tvdb(cache=True, custom_ui=SelectItalianUI, language='it')
self.assertEquals(t_en['dexter'][1][2]['episodename'], 'Crocodile')
self.assertEquals(t_it['dexter'][1][2]['episodename'], 'Lacrime di coccodrillo')
|
'Check searching for show with language=zh returns Chinese seriesname'
| def test_search_in_chinese(self):
| t = tvdb_api.Tvdb(cache=True, language='zh')
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(type(show), tvdb_api.Show)
self.assertEquals(show['seriesname'], u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i')
|
'Check search_all_languages returns Chinese show, with language=en'
| def test_search_in_all_languages(self):
| t = tvdb_api.Tvdb(cache=True, search_all_languages=True, language='en')
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(type(show), tvdb_api.Show)
self.assertEquals(show['seriesname'], u'Virtues Of Harmony II')
|
'Check banners at least one banner is found'
| def test_have_banners(self):
| self.assertEquals((len(self.t['scrubs']['_banners']) > 0), True)
|
'Checks banner URLs start with http://'
| def test_banner_url(self):
| for (banner_type, banner_data) in self.t['scrubs']['_banners'].items():
for (res, res_data) in banner_data.items():
for (bid, banner_info) in res_data.items():
self.assertEquals(banner_info['_bannerpath'].startswith('http://'), True)
|
'Checks episode \'filename\' image is fully qualified URL'
| def test_episode_image(self):
| self.assertEquals(self.t['scrubs'][1][1]['filename'].startswith('http://'), True)
|
'Checks various image URLs within season data are fully qualified'
| def test_show_artwork(self):
| for key in ['banner', 'fanart', 'poster']:
self.assertEquals(self.t['scrubs'][key].startswith('http://'), True)
|
'Check show/_actors key exists and is correct type'
| def test_actors_is_correct_datatype(self):
| self.assertTrue(isinstance(self.t['scrubs']['_actors'], tvdb_api.Actors))
|
'Check show has at least one Actor'
| def test_actors_has_actor(self):
| self.assertTrue(isinstance(self.t['scrubs']['_actors'][0], tvdb_api.Actor))
|
'Check first actor has a name'
| def test_actor_has_name(self):
| self.assertEquals(self.t['scrubs']['_actors'][0]['name'], 'Zach Braff')
|
'Check image URL is fully qualified'
| def test_actor_image_corrected(self):
| for actor in self.t['scrubs']['_actors']:
if (actor['image'] is not None):
self.assertTrue(actor['image'].startswith('http://'))
|
'Check docstring examples works'
| def test_doctest(self):
| import doctest
doctest.testmod(tvdb_api)
|
'Tests setting cache to True/False/string
Basic tests, only checking for errors'
| def test_true_false_string(self):
| tvdb_api.Tvdb(cache=True)
tvdb_api.Tvdb(cache=False)
tvdb_api.Tvdb(cache='/tmp')
|
'Tests setting cache to invalid value'
| def test_invalid_cache_option(self):
| try:
tvdb_api.Tvdb(cache=2.3)
except ValueError:
pass
else:
self.fail('Expected ValueError from setting cache to float')
|
'Check show/_actors key exists and is correct type'
| def test_actors_is_correct_datatype(self):
| self.assertEquals(self.t[76156]['seriesname'], 'Scrubs')
|
''
| def test_get_series_from_zip(self):
| self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
|
'Checks shownames with spaces'
| def test_spaces_from_zip(self):
| self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
|
'Test Tvdb.search method'
| def test_ordering(self):
| self.assertEquals(u'The Train Job', self.t_air['Firefly'][1][1]['episodename'])
self.assertEquals(u'Serenity', self.t_dvd['Firefly'][1][1]['episodename'])
self.assertEquals(u'The Cat & the Claw (Part 1)', self.t_air['Batman The Animated Series'][1][1]['episodename'])
self.assertEquals(u'On Leather Wings', self.t_dvd['Batman The Animated Series'][1][1]['episodename'])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.