repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedset.py
SortedSet.add
def add(self, value): """Add the element *value* to the set.""" if value not in self._set: self._set.add(value) self._list.add(value)
python
def add(self, value): """Add the element *value* to the set.""" if value not in self._set: self._set.add(value) self._list.add(value)
Add the element *value* to the set.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedset.py#L144-L148
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedset.py
SortedSet.copy
def copy(self): """Create a shallow copy of the sorted set.""" return self.__class__(key=self._key, load=self._load, _set=set(self._set))
python
def copy(self): """Create a shallow copy of the sorted set.""" return self.__class__(key=self._key, load=self._load, _set=set(self._set))
Create a shallow copy of the sorted set.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedset.py#L155-L157
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedset.py
SortedSet.difference
def difference(self, *iterables): """ Return a new set with elements in the set that are not in the *iterables*. """ diff = self._set.difference(*iterables) new_set = self.__class__(key=self._key, load=self._load, _set=diff) return new_set
python
def difference(self, *iterables): """ Return a new set with elements in the set that are not in the *iterables*. """ diff = self._set.difference(*iterables) new_set = self.__class__(key=self._key, load=self._load, _set=diff) return new_set
Return a new set with elements in the set that are not in the *iterables*.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedset.py#L192-L199
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedset.py
SortedSet.intersection
def intersection(self, *iterables): """ Return a new set with elements common to the set and all *iterables*. """ comb = self._set.intersection(*iterables) new_set = self.__class__(key=self._key, load=self._load, _set=comb) return new_set
python
def intersection(self, *iterables): """ Return a new set with elements common to the set and all *iterables*. """ comb = self._set.intersection(*iterables) new_set = self.__class__(key=self._key, load=self._load, _set=comb) return new_set
Return a new set with elements common to the set and all *iterables*.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedset.py#L222-L228
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedset.py
SortedSet.symmetric_difference
def symmetric_difference(self, that): """ Return a new set with elements in either *self* or *that* but not both. """ diff = self._set.symmetric_difference(that) new_set = self.__class__(key=self._key, load=self._load, _set=diff) return new_set
python
def symmetric_difference(self, that): """ Return a new set with elements in either *self* or *that* but not both. """ diff = self._set.symmetric_difference(that) new_set = self.__class__(key=self._key, load=self._load, _set=diff) return new_set
Return a new set with elements in either *self* or *that* but not both.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedset.py#L244-L250
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
cookie_decode
def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None''' if isinstance(data, unicode): data = data.encode('ascii') #2to3 hack if cookie_is_encoded(data): sig, msg = data.split(u'?'.encode('ascii'),1) #2to3 hack if sig[1:] == base64.b64encode(hmac.new(key, msg).digest()): return pickle.loads(base64.b64decode(msg)) return None
python
def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None''' if isinstance(data, unicode): data = data.encode('ascii') #2to3 hack if cookie_is_encoded(data): sig, msg = data.split(u'?'.encode('ascii'),1) #2to3 hack if sig[1:] == base64.b64encode(hmac.new(key, msg).digest()): return pickle.loads(base64.b64decode(msg)) return None
Verify and decode an encoded string. Return an object or None
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L1013-L1020
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
cookie_is_encoded
def cookie_is_encoded(data): ''' Verify and decode an encoded string. Return an object or None''' return bool(data.startswith(u'!'.encode('ascii')) and u'?'.encode('ascii') in data)
python
def cookie_is_encoded(data): ''' Verify and decode an encoded string. Return an object or None''' return bool(data.startswith(u'!'.encode('ascii')) and u'?'.encode('ascii') in data)
Verify and decode an encoded string. Return an object or None
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L1023-L1025
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
tonativefunc
def tonativefunc(enc='utf-8'): ''' Returns a function that turns everything into 'native' strings using enc ''' if sys.version_info >= (3,0,0): return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x) return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
python
def tonativefunc(enc='utf-8'): ''' Returns a function that turns everything into 'native' strings using enc ''' if sys.version_info >= (3,0,0): return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x) return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
Returns a function that turns everything into 'native' strings using enc
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L1028-L1032
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Router.add
def add(self, *a, **ka): """ Adds a route->target pair or a Route object to the Router. See Route() for details. """ route = a[0] if a and isinstance(a[0], Route) else Route(*a, **ka) self.routes.append(route) if route.name: self.named[route.name] = route.format_str() if route.static: self.static[route.route] = route.target return gpatt = route.group_re() fpatt = route.flat_re() try: gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((route.target, gregexp)) except (AssertionError, IndexError), e: # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
python
def add(self, *a, **ka): """ Adds a route->target pair or a Route object to the Router. See Route() for details. """ route = a[0] if a and isinstance(a[0], Route) else Route(*a, **ka) self.routes.append(route) if route.name: self.named[route.name] = route.format_str() if route.static: self.static[route.route] = route.target return gpatt = route.group_re() fpatt = route.flat_re() try: gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((route.target, gregexp)) except (AssertionError, IndexError), e: # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
Adds a route->target pair or a Route object to the Router. See Route() for details.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L285-L306
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Bottle.mount
def mount(self, app, script_path): ''' Mount a Bottle application to a specific URL prefix ''' if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') script_path = '/'.join(filter(None, script_path.split('/'))) path_depth = script_path.count('/') + 1 if not script_path: raise TypeError('Empty script_path. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(script_path): raise TypeError('Conflict with existing mount: %s' % other) @self.route('/%s/:#.*#' % script_path, method="ANY") def mountpoint(): request.path_shift(path_depth) return app.handle(request.path, request.method) self.mounts[script_path] = app
python
def mount(self, app, script_path): ''' Mount a Bottle application to a specific URL prefix ''' if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') script_path = '/'.join(filter(None, script_path.split('/'))) path_depth = script_path.count('/') + 1 if not script_path: raise TypeError('Empty script_path. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(script_path): raise TypeError('Conflict with existing mount: %s' % other) @self.route('/%s/:#.*#' % script_path, method="ANY") def mountpoint(): request.path_shift(path_depth) return app.handle(request.path, request.method) self.mounts[script_path] = app
Mount a Bottle application to a specific URL prefix
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L354-L369
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Bottle.handle
def handle(self, url, method): """ Execute the handler bound to the specified url and method and return its output. If catchall is true, exceptions are catched and returned as HTTPError(500) objects. """ if not self.serve: return HTTPError(503, "Server stopped") handler, args = self.match_url(url, method) if not handler: return HTTPError(404, "Not found:" + url) try: return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise return HTTPError(500, 'Unhandled exception', e, format_exc(10))
python
def handle(self, url, method): """ Execute the handler bound to the specified url and method and return its output. If catchall is true, exceptions are catched and returned as HTTPError(500) objects. """ if not self.serve: return HTTPError(503, "Server stopped") handler, args = self.match_url(url, method) if not handler: return HTTPError(404, "Not found:" + url) try: return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise return HTTPError(500, 'Unhandled exception', e, format_exc(10))
Execute the handler bound to the specified url and method and return its output. If catchall is true, exceptions are catched and returned as HTTPError(500) objects.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L429-L448
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Bottle._cast
def _cast(self, out, request, response, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Filtered types (recursive, because they may return anything) for testtype, filterfunc in self.castfilter: if isinstance(out, testtype): return self._cast(filterfunc(out), request, response) # Empty output is done here if not out: response.headers['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, list) and isinstance(out[0], (StringType, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, StringType): response.headers['Content-Length'] = str(len(out)) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) # Cast Files into iterables if hasattr(out, 'read') and 'wsgi.file_wrapper' in request.environ: out = request.environ.get('wsgi.file_wrapper', lambda x, y: iter(lambda: x.read(y), ''))(out, 1024*64) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, StringType): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ % type(first)), request, response)
python
def _cast(self, out, request, response, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Filtered types (recursive, because they may return anything) for testtype, filterfunc in self.castfilter: if isinstance(out, testtype): return self._cast(filterfunc(out), request, response) # Empty output is done here if not out: response.headers['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, list) and isinstance(out[0], (StringType, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, StringType): response.headers['Content-Length'] = str(len(out)) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) # Cast Files into iterables if hasattr(out, 'read') and 'wsgi.file_wrapper' in request.environ: out = request.environ.get('wsgi.file_wrapper', lambda x, y: iter(lambda: x.read(y), ''))(out, 1024*64) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, StringType): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ % type(first)), request, response)
Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L450-L512
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Request.header
def header(self): ''' :class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d ''' if self._header is None: self._header = HeaderDict() for key, value in self.environ.iteritems(): if key.startswith('HTTP_'): key = key[5:].replace('_','-').title() self._header[key] = value return self._header
python
def header(self): ''' :class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d ''' if self._header is None: self._header = HeaderDict() for key, value in self.environ.iteritems(): if key.startswith('HTTP_'): key = key[5:].replace('_','-').title() self._header[key] = value return self._header
:class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L647-L658
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Request.GET
def GET(self): """ The QUERY_STRING parsed into a MultiDict. Keys and values are strings. Multiple values per key are possible. See MultiDict for details. """ if self._GET is None: data = parse_qs(self.query_string, keep_blank_values=True) self._GET = MultiDict() for key, values in data.iteritems(): for value in values: self._GET[key] = value return self._GET
python
def GET(self): """ The QUERY_STRING parsed into a MultiDict. Keys and values are strings. Multiple values per key are possible. See MultiDict for details. """ if self._GET is None: data = parse_qs(self.query_string, keep_blank_values=True) self._GET = MultiDict() for key, values in data.iteritems(): for value in values: self._GET[key] = value return self._GET
The QUERY_STRING parsed into a MultiDict. Keys and values are strings. Multiple values per key are possible. See MultiDict for details.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L661-L673
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Request.COOKIES
def COOKIES(self): """ Cookie information parsed into a dictionary. Secure cookies are NOT decoded automatically. See Request.get_cookie() for details. """ if self._COOKIES is None: raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE','')) self._COOKIES = {} for cookie in raw_dict.itervalues(): self._COOKIES[cookie.key] = cookie.value return self._COOKIES
python
def COOKIES(self): """ Cookie information parsed into a dictionary. Secure cookies are NOT decoded automatically. See Request.get_cookie() for details. """ if self._COOKIES is None: raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE','')) self._COOKIES = {} for cookie in raw_dict.itervalues(): self._COOKIES[cookie.key] = cookie.value return self._COOKIES
Cookie information parsed into a dictionary. Secure cookies are NOT decoded automatically. See Request.get_cookie() for details.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L740-L751
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
Response.set_cookie
def set_cookie(self, key, value, **kargs): """ Add a new cookie with various options. If the cookie value is not a string, a secure cookie is created. Possible options are: expires, path, comment, domain, max_age, secure, version, httponly See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details """ if not isinstance(value, basestring): sec = self.app.config['securecookie.key'] value = cookie_encode(value, sec).decode('ascii') #2to3 hack self.COOKIES[key] = value for k, v in kargs.iteritems(): self.COOKIES[key][k.replace('_', '-')] = v
python
def set_cookie(self, key, value, **kargs): """ Add a new cookie with various options. If the cookie value is not a string, a secure cookie is created. Possible options are: expires, path, comment, domain, max_age, secure, version, httponly See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details """ if not isinstance(value, basestring): sec = self.app.config['securecookie.key'] value = cookie_encode(value, sec).decode('ascii') #2to3 hack self.COOKIES[key] = value for k, v in kargs.iteritems(): self.COOKIES[key][k.replace('_', '-')] = v
Add a new cookie with various options. If the cookie value is not a string, a secure cookie is created. Possible options are: expires, path, comment, domain, max_age, secure, version, httponly See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L809-L823
lrq3000/pyFileFixity
pyFileFixity/filetamper.py
tamper_file_at
def tamper_file_at(path, pos=0, replace_str=None): """ Tamper a file at the given position and using the given string """ if not replace_str: replace_str = "\x00" try: with open(path, "r+b") as fh: if pos < 0: # if negative, we calculate the position backward from the end of file fsize = os.fstat(fh.fileno()).st_size pos = fsize + pos fh.seek(pos) fh.write(replace_str) except IOError: return False finally: try: fh.close() except Exception: pass return True
python
def tamper_file_at(path, pos=0, replace_str=None): """ Tamper a file at the given position and using the given string """ if not replace_str: replace_str = "\x00" try: with open(path, "r+b") as fh: if pos < 0: # if negative, we calculate the position backward from the end of file fsize = os.fstat(fh.fileno()).st_size pos = fsize + pos fh.seek(pos) fh.write(replace_str) except IOError: return False finally: try: fh.close() except Exception: pass return True
Tamper a file at the given position and using the given string
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/filetamper.py#L58-L76
lrq3000/pyFileFixity
pyFileFixity/filetamper.py
tamper_file
def tamper_file(filepath, mode='e', proba=0.03, block_proba=None, blocksize=65535, burst_length=None, header=None): """ Randomly tamper a file's content """ if header and header > 0: blocksize = header tamper_count = 0 # total number of characters tampered in the file total_size = 0 # total buffer size, NOT necessarily the total file size (depends if you set header or not) with open(filepath, "r+b") as fh: # 'r+' allows to read AND overwrite characters. Else any other option won't allow both ('a+' read and append, 'w+' erases the file first then allow to read and write), and 'b' is just for binary because we can open any filetype. if proba >= 1: proba = 1.0/os.fstat(fh.fileno()).st_size * proba # normalizing probability if it's an integer (ie: the number of characters to flip on average) buf = fh.read(blocksize) # We process blocks by blocks because it's a lot faster (IO is still the slowest operation in any computing system) while len(buf) > 0: total_size += len(buf) if not block_proba or (random.random() < block_proba): # If block tampering is enabled, process only if this block is selected by probability pos2tamper = [] burst_remain = 0 # if burst is enabled and corruption probability is triggered, then we will here store the remaining number of characters to corrupt (the length is uniformly sampled over the range specified in arguments) # Create the list of bits to tamper (it's a lot more efficient to precompute the list of characters to corrupt, and then modify in the file the characters all at once) for i in xrange(len(buf)): if burst_remain > 0 or (random.random() < proba): # Corruption probability: corrupt only if below the bit-flip proba pos2tamper.append(i) # keep this character's position in the to-be-corrupted list if burst_remain > 0: # if we're already in a burst, we minus one and continue onto the next character burst_remain -= 1 elif burst_length: # else we're not in a burst, we create one (triggered by corruption probability: as soon as one character triggers the corruption probability, then we do a burst) burst_remain = random.randint(burst_length[0], burst_length[1]) - 1 # if burst is enabled, then we randomly (uniformly) pick a random length for the burst between the range specified, and since we already tampered one character, we minus 1 # If there's any character to tamper in the list, we tamper the string if pos2tamper: tamper_count = tamper_count + len(pos2tamper) #print("Before: %s" % buf) buf = bytearray(buf) # Strings in Python are immutable, thus we need to convert to a bytearray for pos in pos2tamper: if mode == 'e' or mode == 'erasure': # Erase the character (set a null byte) buf[pos] = 0 elif mode == 'n' or mode == 'noise': # Noising the character (set a random ASCII character) buf[pos] = random.randint(0,255) #print("After: %s" % buf) # Overwriting the string into the file prevpos = fh.tell() # need to store and place back the seek cursor because after the write, if it's the end of the file, the next read may be buggy (getting characters that are not part of the file) fh.seek(fh.tell()-len(buf)) # Move the cursor at the beginning of the string we just read fh.write(buf) # Overwrite it fh.seek(prevpos) # Restore the previous position after the string # If we only tamper the header, we stop here by setting the buffer to an empty string if header and header > 0: buf = '' # Else we continue to the next data block else: # Load the next characters from file buf = fh.read(blocksize) return [tamper_count, total_size]
python
def tamper_file(filepath, mode='e', proba=0.03, block_proba=None, blocksize=65535, burst_length=None, header=None): """ Randomly tamper a file's content """ if header and header > 0: blocksize = header tamper_count = 0 # total number of characters tampered in the file total_size = 0 # total buffer size, NOT necessarily the total file size (depends if you set header or not) with open(filepath, "r+b") as fh: # 'r+' allows to read AND overwrite characters. Else any other option won't allow both ('a+' read and append, 'w+' erases the file first then allow to read and write), and 'b' is just for binary because we can open any filetype. if proba >= 1: proba = 1.0/os.fstat(fh.fileno()).st_size * proba # normalizing probability if it's an integer (ie: the number of characters to flip on average) buf = fh.read(blocksize) # We process blocks by blocks because it's a lot faster (IO is still the slowest operation in any computing system) while len(buf) > 0: total_size += len(buf) if not block_proba or (random.random() < block_proba): # If block tampering is enabled, process only if this block is selected by probability pos2tamper = [] burst_remain = 0 # if burst is enabled and corruption probability is triggered, then we will here store the remaining number of characters to corrupt (the length is uniformly sampled over the range specified in arguments) # Create the list of bits to tamper (it's a lot more efficient to precompute the list of characters to corrupt, and then modify in the file the characters all at once) for i in xrange(len(buf)): if burst_remain > 0 or (random.random() < proba): # Corruption probability: corrupt only if below the bit-flip proba pos2tamper.append(i) # keep this character's position in the to-be-corrupted list if burst_remain > 0: # if we're already in a burst, we minus one and continue onto the next character burst_remain -= 1 elif burst_length: # else we're not in a burst, we create one (triggered by corruption probability: as soon as one character triggers the corruption probability, then we do a burst) burst_remain = random.randint(burst_length[0], burst_length[1]) - 1 # if burst is enabled, then we randomly (uniformly) pick a random length for the burst between the range specified, and since we already tampered one character, we minus 1 # If there's any character to tamper in the list, we tamper the string if pos2tamper: tamper_count = tamper_count + len(pos2tamper) #print("Before: %s" % buf) buf = bytearray(buf) # Strings in Python are immutable, thus we need to convert to a bytearray for pos in pos2tamper: if mode == 'e' or mode == 'erasure': # Erase the character (set a null byte) buf[pos] = 0 elif mode == 'n' or mode == 'noise': # Noising the character (set a random ASCII character) buf[pos] = random.randint(0,255) #print("After: %s" % buf) # Overwriting the string into the file prevpos = fh.tell() # need to store and place back the seek cursor because after the write, if it's the end of the file, the next read may be buggy (getting characters that are not part of the file) fh.seek(fh.tell()-len(buf)) # Move the cursor at the beginning of the string we just read fh.write(buf) # Overwrite it fh.seek(prevpos) # Restore the previous position after the string # If we only tamper the header, we stop here by setting the buffer to an empty string if header and header > 0: buf = '' # Else we continue to the next data block else: # Load the next characters from file buf = fh.read(blocksize) return [tamper_count, total_size]
Randomly tamper a file's content
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/filetamper.py#L78-L124
lrq3000/pyFileFixity
pyFileFixity/filetamper.py
tamper_dir
def tamper_dir(inputpath, *args, **kwargs): """ Randomly tamper the files content in a directory tree, recursively """ silent = kwargs.get('silent', False) if 'silent' in kwargs: del kwargs['silent'] filescount = 0 for _ in tqdm(recwalk(inputpath), desc='Precomputing', disable=silent): filescount += 1 files_tampered = 0 tamper_count = 0 total_size = 0 for dirname, filepath in tqdm(recwalk(inputpath), total=filescount, leave=True, desc='Tamper file n.', disable=silent): tcount, tsize = tamper_file(os.path.join(dirname, filepath), *args, **kwargs) if tcount > 0: tamper_count += tcount files_tampered += 1 total_size += tsize return [files_tampered, filescount, tamper_count, total_size]
python
def tamper_dir(inputpath, *args, **kwargs): """ Randomly tamper the files content in a directory tree, recursively """ silent = kwargs.get('silent', False) if 'silent' in kwargs: del kwargs['silent'] filescount = 0 for _ in tqdm(recwalk(inputpath), desc='Precomputing', disable=silent): filescount += 1 files_tampered = 0 tamper_count = 0 total_size = 0 for dirname, filepath in tqdm(recwalk(inputpath), total=filescount, leave=True, desc='Tamper file n.', disable=silent): tcount, tsize = tamper_file(os.path.join(dirname, filepath), *args, **kwargs) if tcount > 0: tamper_count += tcount files_tampered += 1 total_size += tsize return [files_tampered, filescount, tamper_count, total_size]
Randomly tamper the files content in a directory tree, recursively
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/filetamper.py#L126-L144
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
TrackedObject._save_trace
def _save_trace(self): """ Save current stack trace as formatted string. """ stack_trace = stack() try: self.trace = [] for frm in stack_trace[5:]: # eliminate our own overhead self.trace.insert(0, frm[1:]) finally: del stack_trace
python
def _save_trace(self): """ Save current stack trace as formatted string. """ stack_trace = stack() try: self.trace = [] for frm in stack_trace[5:]: # eliminate our own overhead self.trace.insert(0, frm[1:]) finally: del stack_trace
Save current stack trace as formatted string.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L110-L120
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
TrackedObject.track_size
def track_size(self, ts, sizer): """ Store timestamp and current size for later evaluation. The 'sizer' is a stateful sizing facility that excludes other tracked objects. """ obj = self.ref() self.snapshots.append( (ts, sizer.asized(obj, detail=self._resolution_level)) ) if obj is not None: self.repr = safe_repr(obj, clip=128)
python
def track_size(self, ts, sizer): """ Store timestamp and current size for later evaluation. The 'sizer' is a stateful sizing facility that excludes other tracked objects. """ obj = self.ref() self.snapshots.append( (ts, sizer.asized(obj, detail=self._resolution_level)) ) if obj is not None: self.repr = safe_repr(obj, clip=128)
Store timestamp and current size for later evaluation. The 'sizer' is a stateful sizing facility that excludes other tracked objects.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L122-L133
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
TrackedObject.get_size_at_time
def get_size_at_time(self, timestamp): """ Get the size of the object at a specific time (snapshot). If the object was not alive/sized at that instant, return 0. """ size = 0 for (t, s) in self.snapshots: if t == timestamp: size = s.size return size
python
def get_size_at_time(self, timestamp): """ Get the size of the object at a specific time (snapshot). If the object was not alive/sized at that instant, return 0. """ size = 0 for (t, s) in self.snapshots: if t == timestamp: size = s.size return size
Get the size of the object at a specific time (snapshot). If the object was not alive/sized at that instant, return 0.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L141-L150
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
PeriodicThread.run
def run(self): """ Loop until a stop signal is set. """ self.stop = False while not self.stop: self.tracker.create_snapshot() sleep(self.interval)
python
def run(self): """ Loop until a stop signal is set. """ self.stop = False while not self.stop: self.tracker.create_snapshot() sleep(self.interval)
Loop until a stop signal is set.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L193-L200
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
Snapshot.total
def total(self): """ Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality. """ if self.system_total.available: return self.system_total.vsz elif self.asizeof_total: # pragma: no cover return self.asizeof_total else: # pragma: no cover return self.tracked_total
python
def total(self): """ Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality. """ if self.system_total.available: return self.system_total.vsz elif self.asizeof_total: # pragma: no cover return self.asizeof_total else: # pragma: no cover return self.tracked_total
Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L217-L228
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
Snapshot.label
def label(self): """Return timestamped label for this snapshot, or a raw timestamp.""" if not self.desc: return "%.3fs" % self.timestamp return "%s (%.3fs)" % (self.desc, self.timestamp)
python
def label(self): """Return timestamped label for this snapshot, or a raw timestamp.""" if not self.desc: return "%.3fs" % self.timestamp return "%s (%.3fs)" % (self.desc, self.timestamp)
Return timestamped label for this snapshot, or a raw timestamp.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L232-L236
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker._tracker
def _tracker(self, _observer_, _self_, *args, **kwds): """ Injected constructor for tracked classes. Call the actual constructor of the object and track the object. Attach to the object before calling the constructor to track the object with the parameters of the most specialized class. """ self.track_object(_self_, name=_observer_.name, resolution_level=_observer_.detail, keep=_observer_.keep, trace=_observer_.trace) _observer_.init(_self_, *args, **kwds)
python
def _tracker(self, _observer_, _self_, *args, **kwds): """ Injected constructor for tracked classes. Call the actual constructor of the object and track the object. Attach to the object before calling the constructor to track the object with the parameters of the most specialized class. """ self.track_object(_self_, name=_observer_.name, resolution_level=_observer_.detail, keep=_observer_.keep, trace=_observer_.trace) _observer_.init(_self_, *args, **kwds)
Injected constructor for tracked classes. Call the actual constructor of the object and track the object. Attach to the object before calling the constructor to track the object with the parameters of the most specialized class.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L283-L295
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker._inject_constructor
def _inject_constructor(self, cls, func, name, resolution_level, keep, trace): """ Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later. """ try: constructor = cls.__init__ except AttributeError: def constructor(self, *_args, **_kwargs): pass # Possible name clash between keyword arguments of the tracked class' # constructor and the curried arguments of the injected constructor. # Therefore, the additional argument has a 'magic' name to make it less # likely that an argument name clash occurs. self._observers[cls] = _ClassObserver(constructor, name, resolution_level, keep, trace) cls.__init__ = instancemethod( lambda *args, **kwds: func(self._observers[cls], *args, **kwds), None, cls )
python
def _inject_constructor(self, cls, func, name, resolution_level, keep, trace): """ Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later. """ try: constructor = cls.__init__ except AttributeError: def constructor(self, *_args, **_kwargs): pass # Possible name clash between keyword arguments of the tracked class' # constructor and the curried arguments of the injected constructor. # Therefore, the additional argument has a 'magic' name to make it less # likely that an argument name clash occurs. self._observers[cls] = _ClassObserver(constructor, name, resolution_level, keep, trace) cls.__init__ = instancemethod( lambda *args, **kwds: func(self._observers[cls], *args, **kwds), None, cls )
Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L298-L324
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker._track_modify
def _track_modify(self, cls, name, detail, keep, trace): """ Modify settings of a tracked class """ self._observers[cls].modify(name, detail, keep, trace)
python
def _track_modify(self, cls, name, detail, keep, trace): """ Modify settings of a tracked class """ self._observers[cls].modify(name, detail, keep, trace)
Modify settings of a tracked class
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L334-L338
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker._restore_constructor
def _restore_constructor(self, cls): """ Restore the original constructor, lose track of class. """ cls.__init__ = self._observers[cls].init del self._observers[cls]
python
def _restore_constructor(self, cls): """ Restore the original constructor, lose track of class. """ cls.__init__ = self._observers[cls].init del self._observers[cls]
Restore the original constructor, lose track of class.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L341-L346
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.track_change
def track_change(self, instance, resolution_level=0): """ Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised. """ tobj = self.objects[id(instance)] tobj.set_resolution_level(resolution_level)
python
def track_change(self, instance, resolution_level=0): """ Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised. """ tobj = self.objects[id(instance)] tobj.set_resolution_level(resolution_level)
Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L349-L355
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.track_object
def track_object(self, instance, name=None, resolution_level=0, keep=False, trace=False): """ Track object 'instance' and sample size and lifetime information. Not all objects can be tracked; trackable objects are class instances and other objects that can be weakly referenced. When an object cannot be tracked, a `TypeError` is raised. :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. """ # Check if object is already tracked. This happens if track_object is # called multiple times for the same object or if an object inherits # from multiple tracked classes. In the latter case, the most # specialized class wins. To detect id recycling, the weak reference # is checked. If it is 'None' a tracked object is dead and another one # takes the same 'id'. if id(instance) in self.objects and \ self.objects[id(instance)].ref() is not None: return tobj = TrackedObject(instance, resolution_level=resolution_level, trace=trace) if name is None: name = instance.__class__.__name__ if not name in self.index: self.index[name] = [] self.index[name].append(tobj) self.objects[id(instance)] = tobj if keep: self._keepalive.append(instance)
python
def track_object(self, instance, name=None, resolution_level=0, keep=False, trace=False): """ Track object 'instance' and sample size and lifetime information. Not all objects can be tracked; trackable objects are class instances and other objects that can be weakly referenced. When an object cannot be tracked, a `TypeError` is raised. :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. """ # Check if object is already tracked. This happens if track_object is # called multiple times for the same object or if an object inherits # from multiple tracked classes. In the latter case, the most # specialized class wins. To detect id recycling, the weak reference # is checked. If it is 'None' a tracked object is dead and another one # takes the same 'id'. if id(instance) in self.objects and \ self.objects[id(instance)].ref() is not None: return tobj = TrackedObject(instance, resolution_level=resolution_level, trace=trace) if name is None: name = instance.__class__.__name__ if not name in self.index: self.index[name] = [] self.index[name].append(tobj) self.objects[id(instance)] = tobj if keep: self._keepalive.append(instance)
Track object 'instance' and sample size and lifetime information. Not all objects can be tracked; trackable objects are class instances and other objects that can be weakly referenced. When an object cannot be tracked, a `TypeError` is raised. :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L358-L393
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.track_class
def track_class(self, cls, name=None, resolution_level=0, keep=False, trace=False): """ Track all objects of the class `cls`. Objects of that type that already exist are *not* tracked. If `track_class` is called for a class already tracked, the tracking parameters are modified. Instantiation traces can be generated by setting `trace` to True. A constructor is injected to begin instance tracking on creation of the object. The constructor calls `track_object` internally. :param cls: class to be tracked, may be an old-style or a new-style class :param name: reference the class by a name, default is the concatenation of module and class name :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. :param trace: Save instantiation stack trace for each instance """ if not isclass(cls): raise TypeError("only class objects can be tracked") if name is None: name = cls.__module__ + '.' + cls.__name__ if self._is_tracked(cls): self._track_modify(cls, name, resolution_level, keep, trace) else: self._inject_constructor(cls, self._tracker, name, resolution_level, keep, trace)
python
def track_class(self, cls, name=None, resolution_level=0, keep=False, trace=False): """ Track all objects of the class `cls`. Objects of that type that already exist are *not* tracked. If `track_class` is called for a class already tracked, the tracking parameters are modified. Instantiation traces can be generated by setting `trace` to True. A constructor is injected to begin instance tracking on creation of the object. The constructor calls `track_object` internally. :param cls: class to be tracked, may be an old-style or a new-style class :param name: reference the class by a name, default is the concatenation of module and class name :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. :param trace: Save instantiation stack trace for each instance """ if not isclass(cls): raise TypeError("only class objects can be tracked") if name is None: name = cls.__module__ + '.' + cls.__name__ if self._is_tracked(cls): self._track_modify(cls, name, resolution_level, keep, trace) else: self._inject_constructor(cls, self._tracker, name, resolution_level, keep, trace)
Track all objects of the class `cls`. Objects of that type that already exist are *not* tracked. If `track_class` is called for a class already tracked, the tracking parameters are modified. Instantiation traces can be generated by setting `trace` to True. A constructor is injected to begin instance tracking on creation of the object. The constructor calls `track_object` internally. :param cls: class to be tracked, may be an old-style or a new-style class :param name: reference the class by a name, default is the concatenation of module and class name :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. :param trace: Save instantiation stack trace for each instance
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L396-L423
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.detach_all_classes
def detach_all_classes(self): """ Detach from all tracked classes. """ classes = list(self._observers.keys()) for cls in classes: self.detach_class(cls)
python
def detach_all_classes(self): """ Detach from all tracked classes. """ classes = list(self._observers.keys()) for cls in classes: self.detach_class(cls)
Detach from all tracked classes.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L434-L440
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.detach_all
def detach_all(self): """ Detach from all tracked classes and objects. Restore the original constructors and cleanse the tracking lists. """ self.detach_all_classes() self.objects.clear() self.index.clear() self._keepalive[:] = []
python
def detach_all(self): """ Detach from all tracked classes and objects. Restore the original constructors and cleanse the tracking lists. """ self.detach_all_classes() self.objects.clear() self.index.clear() self._keepalive[:] = []
Detach from all tracked classes and objects. Restore the original constructors and cleanse the tracking lists.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L443-L451
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.start_periodic_snapshots
def start_periodic_snapshots(self, interval=1.0): """ Start a thread which takes snapshots periodically. The `interval` specifies the time in seconds the thread waits between taking snapshots. The thread is started as a daemon allowing the program to exit. If periodic snapshots are already active, the interval is updated. """ if not self._periodic_thread: self._periodic_thread = PeriodicThread(self, interval, name='BackgroundMonitor') self._periodic_thread.setDaemon(True) self._periodic_thread.start() else: self._periodic_thread.interval = interval
python
def start_periodic_snapshots(self, interval=1.0): """ Start a thread which takes snapshots periodically. The `interval` specifies the time in seconds the thread waits between taking snapshots. The thread is started as a daemon allowing the program to exit. If periodic snapshots are already active, the interval is updated. """ if not self._periodic_thread: self._periodic_thread = PeriodicThread(self, interval, name='BackgroundMonitor') self._periodic_thread.setDaemon(True) self._periodic_thread.start() else: self._periodic_thread.interval = interval
Start a thread which takes snapshots periodically. The `interval` specifies the time in seconds the thread waits between taking snapshots. The thread is started as a daemon allowing the program to exit. If periodic snapshots are already active, the interval is updated.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L465-L477
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.stop_periodic_snapshots
def stop_periodic_snapshots(self): """ Post a stop signal to the thread that takes the periodic snapshots. The function waits for the thread to terminate which can take some time depending on the configured interval. """ if self._periodic_thread and self._periodic_thread.isAlive(): self._periodic_thread.stop = True self._periodic_thread.join() self._periodic_thread = None
python
def stop_periodic_snapshots(self): """ Post a stop signal to the thread that takes the periodic snapshots. The function waits for the thread to terminate which can take some time depending on the configured interval. """ if self._periodic_thread and self._periodic_thread.isAlive(): self._periodic_thread.stop = True self._periodic_thread.join() self._periodic_thread = None
Post a stop signal to the thread that takes the periodic snapshots. The function waits for the thread to terminate which can take some time depending on the configured interval.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L479-L488
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.create_snapshot
def create_snapshot(self, description='', compute_total=False): """ Collect current per instance statistics and saves total amount of memory associated with the Python process. If `compute_total` is `True`, the total consumption of all objects known to *asizeof* is computed. The latter might be very slow if many objects are mapped into memory at the time the snapshot is taken. Therefore, `compute_total` is set to `False` by default. The overhead of the `ClassTracker` structure is also computed. Snapshots can be taken asynchronously. The function is protected with a lock to prevent race conditions. """ try: # TODO: It is not clear what happens when memory is allocated or # released while this function is executed but it will likely lead # to inconsistencies. Either pause all other threads or don't size # individual objects in asynchronous mode. self.snapshot_lock.acquire() timestamp = _get_time() sizer = asizeof.Asizer() objs = [tobj.ref() for tobj in list(self.objects.values())] sizer.exclude_refs(*objs) # The objects need to be sized in a deterministic order. Sort the # objects by its creation date which should at least work for non-parallel # execution. The "proper" fix would be to handle shared data separately. tracked_objects = list(self.objects.values()) tracked_objects.sort(key=lambda x: x.birth) for tobj in tracked_objects: tobj.track_size(timestamp, sizer) snapshot = Snapshot() snapshot.timestamp = timestamp snapshot.tracked_total = sizer.total if compute_total: snapshot.asizeof_total = asizeof.asizeof(all=True, code=True) snapshot.system_total = pympler.process.ProcessMemoryInfo() snapshot.desc = str(description) # Compute overhead of all structures, use sizer to exclude tracked objects(!) snapshot.overhead = 0 if snapshot.tracked_total: snapshot.overhead = sizer.asizeof(self) if snapshot.asizeof_total: snapshot.asizeof_total -= snapshot.overhead self.snapshots.append(snapshot) finally: self.snapshot_lock.release()
python
def create_snapshot(self, description='', compute_total=False): """ Collect current per instance statistics and saves total amount of memory associated with the Python process. If `compute_total` is `True`, the total consumption of all objects known to *asizeof* is computed. The latter might be very slow if many objects are mapped into memory at the time the snapshot is taken. Therefore, `compute_total` is set to `False` by default. The overhead of the `ClassTracker` structure is also computed. Snapshots can be taken asynchronously. The function is protected with a lock to prevent race conditions. """ try: # TODO: It is not clear what happens when memory is allocated or # released while this function is executed but it will likely lead # to inconsistencies. Either pause all other threads or don't size # individual objects in asynchronous mode. self.snapshot_lock.acquire() timestamp = _get_time() sizer = asizeof.Asizer() objs = [tobj.ref() for tobj in list(self.objects.values())] sizer.exclude_refs(*objs) # The objects need to be sized in a deterministic order. Sort the # objects by its creation date which should at least work for non-parallel # execution. The "proper" fix would be to handle shared data separately. tracked_objects = list(self.objects.values()) tracked_objects.sort(key=lambda x: x.birth) for tobj in tracked_objects: tobj.track_size(timestamp, sizer) snapshot = Snapshot() snapshot.timestamp = timestamp snapshot.tracked_total = sizer.total if compute_total: snapshot.asizeof_total = asizeof.asizeof(all=True, code=True) snapshot.system_total = pympler.process.ProcessMemoryInfo() snapshot.desc = str(description) # Compute overhead of all structures, use sizer to exclude tracked objects(!) snapshot.overhead = 0 if snapshot.tracked_total: snapshot.overhead = sizer.asizeof(self) if snapshot.asizeof_total: snapshot.asizeof_total -= snapshot.overhead self.snapshots.append(snapshot) finally: self.snapshot_lock.release()
Collect current per instance statistics and saves total amount of memory associated with the Python process. If `compute_total` is `True`, the total consumption of all objects known to *asizeof* is computed. The latter might be very slow if many objects are mapped into memory at the time the snapshot is taken. Therefore, `compute_total` is set to `False` by default. The overhead of the `ClassTracker` structure is also computed. Snapshots can be taken asynchronously. The function is protected with a lock to prevent race conditions.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L496-L552
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/python_bindings/argparse_to_json.py
is_required
def is_required(action): '''_actions which are positional or possessing the `required` flag ''' return not action.option_strings and not isinstance(action, _SubParsersAction) or action.required == True
python
def is_required(action): '''_actions which are positional or possessing the `required` flag ''' return not action.option_strings and not isinstance(action, _SubParsersAction) or action.required == True
_actions which are positional or possessing the `required` flag
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/python_bindings/argparse_to_json.py#L97-L99
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
entry_fields
def entry_fields(entry, field_delim="\xFF"): '''From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header''' entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field = entry[fourth+len(field_delim):] # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] #print(entry) #print(len(entry)) return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field": ecc_field}
python
def entry_fields(entry, field_delim="\xFF"): '''From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header''' entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field = entry[fourth+len(field_delim):] # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] #print(entry) #print(len(entry)) return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field": ecc_field}
From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L92-L124
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
entry_assemble
def entry_assemble(entry_fields, ecc_params, header_size, filepath, fileheader=None): '''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Extract the header from the file if fileheader is None: with open(filepath, 'rb') as file: # filepath is the absolute path to the original file (the one with maybe corruptions, NOT the output repaired file!) # Compute the size of the buffer to read: either header_size if possible, but if the file is smaller than that then we will read the whole file. if entry_fields["filesize"] > 0 and entry_fields["filesize"] < header_size: fileheader = file.read(entry_fields["filesize"]) else: fileheader = file.read(header_size) # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block entry_asm = [] for i, j in itertools.izip(xrange(0, len(fileheader), ecc_params["message_size"]), xrange(0, len(entry_fields["ecc_field"]), ecc_params["hash_size"] + ecc_params["ecc_size"])): # Extract each fields from each block mes = fileheader[i:i+ecc_params["message_size"]] hash = entry_fields["ecc_field"][j:j+ecc_params["hash_size"]] ecc = entry_fields["ecc_field"][j+ecc_params["hash_size"]:j+ecc_params["hash_size"]+ecc_params["ecc_size"]] entry_asm.append({"message": mes, "hash": hash, "ecc": ecc}) # Return a list of fields for each block return entry_asm
python
def entry_assemble(entry_fields, ecc_params, header_size, filepath, fileheader=None): '''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Extract the header from the file if fileheader is None: with open(filepath, 'rb') as file: # filepath is the absolute path to the original file (the one with maybe corruptions, NOT the output repaired file!) # Compute the size of the buffer to read: either header_size if possible, but if the file is smaller than that then we will read the whole file. if entry_fields["filesize"] > 0 and entry_fields["filesize"] < header_size: fileheader = file.read(entry_fields["filesize"]) else: fileheader = file.read(header_size) # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block entry_asm = [] for i, j in itertools.izip(xrange(0, len(fileheader), ecc_params["message_size"]), xrange(0, len(entry_fields["ecc_field"]), ecc_params["hash_size"] + ecc_params["ecc_size"])): # Extract each fields from each block mes = fileheader[i:i+ecc_params["message_size"]] hash = entry_fields["ecc_field"][j:j+ecc_params["hash_size"]] ecc = entry_fields["ecc_field"][j+ecc_params["hash_size"]:j+ecc_params["hash_size"]+ecc_params["ecc_size"]] entry_asm.append({"message": mes, "hash": hash, "ecc": ecc}) # Return a list of fields for each block return entry_asm
From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L126-L147
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
compute_ecc_hash
def compute_ecc_hash(ecc_manager, hasher, buf, max_block_size, rate, message_size=None, as_string=False): '''Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.''' result = [] # If required parameters were not provided, we compute them if not message_size: ecc_params = compute_ecc_params(max_block_size, rate, hasher) message_size = ecc_params["message_size"] # Split the buffer string in blocks (necessary for Reed-Solomon encoding because it's limited to 255 characters max) for i in xrange(0, len(buf), message_size): # Compute the message block mes = buf[i:i+message_size] # Compute the ecc ecc = ecc_manager.encode(mes) # Compute the hash hash = hasher.hash(mes) #crc = zlib.crc32(mes) # DEPRECATED: CRC is not resilient enough #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result (either in string for easy writing into a file, or in a list for easy post-processing) if as_string: result.append("%s%s" % (str(hash),str(ecc))) else: result.append([hash, ecc]) return result
python
def compute_ecc_hash(ecc_manager, hasher, buf, max_block_size, rate, message_size=None, as_string=False): '''Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.''' result = [] # If required parameters were not provided, we compute them if not message_size: ecc_params = compute_ecc_params(max_block_size, rate, hasher) message_size = ecc_params["message_size"] # Split the buffer string in blocks (necessary for Reed-Solomon encoding because it's limited to 255 characters max) for i in xrange(0, len(buf), message_size): # Compute the message block mes = buf[i:i+message_size] # Compute the ecc ecc = ecc_manager.encode(mes) # Compute the hash hash = hasher.hash(mes) #crc = zlib.crc32(mes) # DEPRECATED: CRC is not resilient enough #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result (either in string for easy writing into a file, or in a list for easy post-processing) if as_string: result.append("%s%s" % (str(hash),str(ecc))) else: result.append([hash, ecc]) return result
Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L149-L172
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
ecc_correct_intra
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
python
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
Correct an intra-field with its corresponding intra-ecc if necessary
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L174-L205
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._eliminate_leafs
def _eliminate_leafs(self, graph): """ Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs. """ result = [] idset = set([id(x) for x in graph]) for n in graph: refset = set([id(x) for x in get_referents(n)]) if refset.intersection(idset): result.append(n) return result
python
def _eliminate_leafs(self, graph): """ Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs. """ result = [] idset = set([id(x) for x in graph]) for n in graph: refset = set([id(x) for x in get_referents(n)]) if refset.intersection(idset): result.append(n) return result
Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L92-L104
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._reduce_to_cycles
def _reduce_to_cycles(self): """ Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the number of objects involved in reference cycles. If there are no cycles, `self.objects` will be an empty list and this method returns 0. """ cycles = self.objects[:] cnt = 0 while cnt != len(cycles): cnt = len(cycles) cycles = self._eliminate_leafs(cycles) self.objects = cycles return len(self.objects)
python
def _reduce_to_cycles(self): """ Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the number of objects involved in reference cycles. If there are no cycles, `self.objects` will be an empty list and this method returns 0. """ cycles = self.objects[:] cnt = 0 while cnt != len(cycles): cnt = len(cycles) cycles = self._eliminate_leafs(cycles) self.objects = cycles return len(self.objects)
Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the number of objects involved in reference cycles. If there are no cycles, `self.objects` will be an empty list and this method returns 0.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L107-L120
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph.reduce_to_cycles
def reduce_to_cycles(self): """ Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the reduced graph. If there are no cycles, None is returned. """ if not self._reduced: reduced = copy(self) reduced.objects = self.objects[:] reduced.metadata = [] reduced.edges = [] self.num_in_cycles = reduced._reduce_to_cycles() reduced.num_in_cycles = self.num_in_cycles if self.num_in_cycles: reduced._get_edges() reduced._annotate_objects() for meta in reduced.metadata: meta.cycle = True else: reduced = None self._reduced = reduced return self._reduced
python
def reduce_to_cycles(self): """ Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the reduced graph. If there are no cycles, None is returned. """ if not self._reduced: reduced = copy(self) reduced.objects = self.objects[:] reduced.metadata = [] reduced.edges = [] self.num_in_cycles = reduced._reduce_to_cycles() reduced.num_in_cycles = self.num_in_cycles if self.num_in_cycles: reduced._get_edges() reduced._annotate_objects() for meta in reduced.metadata: meta.cycle = True else: reduced = None self._reduced = reduced return self._reduced
Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the reduced graph. If there are no cycles, None is returned.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L123-L144
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._get_edges
def _get_edges(self): """ Compute the edges for the reference graph. The function returns a set of tuples (id(a), id(b), ref) if a references b with the referent 'ref'. """ idset = set([id(x) for x in self.objects]) self.edges = set([]) for n in self.objects: refset = set([id(x) for x in get_referents(n)]) for ref in refset.intersection(idset): label = '' members = None if isinstance(n, dict): members = n.items() if not members: members = named_refs(n) for (k, v) in members: if id(v) == ref: label = k break self.edges.add(_Edge(id(n), ref, label))
python
def _get_edges(self): """ Compute the edges for the reference graph. The function returns a set of tuples (id(a), id(b), ref) if a references b with the referent 'ref'. """ idset = set([id(x) for x in self.objects]) self.edges = set([]) for n in self.objects: refset = set([id(x) for x in get_referents(n)]) for ref in refset.intersection(idset): label = '' members = None if isinstance(n, dict): members = n.items() if not members: members = named_refs(n) for (k, v) in members: if id(v) == ref: label = k break self.edges.add(_Edge(id(n), ref, label))
Compute the edges for the reference graph. The function returns a set of tuples (id(a), id(b), ref) if a references b with the referent 'ref'.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L147-L168
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._annotate_groups
def _annotate_groups(self): """ Annotate the objects belonging to separate (non-connected) graphs with individual indices. """ g = {} for x in self.metadata: g[x.id] = x idx = 0 for x in self.metadata: if not hasattr(x, 'group'): x.group = idx idx += 1 neighbors = set() for e in self.edges: if e.src == x.id: neighbors.add(e.dst) if e.dst == x.id: neighbors.add(e.src) for nb in neighbors: g[nb].group = min(x.group, getattr(g[nb], 'group', idx)) # Assign the edges to the respective groups. Both "ends" of the edge # should share the same group so just use the first object's group. for e in self.edges: e.group = g[e.src].group self._max_group = idx
python
def _annotate_groups(self): """ Annotate the objects belonging to separate (non-connected) graphs with individual indices. """ g = {} for x in self.metadata: g[x.id] = x idx = 0 for x in self.metadata: if not hasattr(x, 'group'): x.group = idx idx += 1 neighbors = set() for e in self.edges: if e.src == x.id: neighbors.add(e.dst) if e.dst == x.id: neighbors.add(e.src) for nb in neighbors: g[nb].group = min(x.group, getattr(g[nb], 'group', idx)) # Assign the edges to the respective groups. Both "ends" of the edge # should share the same group so just use the first object's group. for e in self.edges: e.group = g[e.src].group self._max_group = idx
Annotate the objects belonging to separate (non-connected) graphs with individual indices.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L171-L199
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._filter_group
def _filter_group(self, group): """ Eliminate all objects but those which belong to `group`. ``self.objects``, ``self.metadata`` and ``self.edges`` are modified. Returns `True` if the group is non-empty. Otherwise returns `False`. """ self.metadata = [x for x in self.metadata if x.group == group] group_set = set([x.id for x in self.metadata]) self.objects = [obj for obj in self.objects if id(obj) in group_set] self.count = len(self.metadata) if self.metadata == []: return False self.edges = [e for e in self.edges if e.group == group] del self._max_group return True
python
def _filter_group(self, group): """ Eliminate all objects but those which belong to `group`. ``self.objects``, ``self.metadata`` and ``self.edges`` are modified. Returns `True` if the group is non-empty. Otherwise returns `False`. """ self.metadata = [x for x in self.metadata if x.group == group] group_set = set([x.id for x in self.metadata]) self.objects = [obj for obj in self.objects if id(obj) in group_set] self.count = len(self.metadata) if self.metadata == []: return False self.edges = [e for e in self.edges if e.group == group] del self._max_group return True
Eliminate all objects but those which belong to `group`. ``self.objects``, ``self.metadata`` and ``self.edges`` are modified. Returns `True` if the group is non-empty. Otherwise returns `False`.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L202-L219
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph.split
def split(self): """ Split the graph into sub-graphs. Only connected objects belong to the same graph. `split` yields copies of the Graph object. Shallow copies are used that only replicate the meta-information, but share the same object list ``self.objects``. >>> from pympler.refgraph import ReferenceGraph >>> a = 42 >>> b = 'spam' >>> c = {a: b} >>> t = (1,2,3) >>> rg = ReferenceGraph([a,b,c,t]) >>> for subgraph in rg.split(): ... print subgraph.index 0 1 """ self._annotate_groups() index = 0 for group in range(self._max_group): subgraph = copy(self) subgraph.metadata = self.metadata[:] subgraph.edges = self.edges.copy() if subgraph._filter_group(group): subgraph.total_size = sum([x.size for x in subgraph.metadata]) subgraph.index = index index += 1 yield subgraph
python
def split(self): """ Split the graph into sub-graphs. Only connected objects belong to the same graph. `split` yields copies of the Graph object. Shallow copies are used that only replicate the meta-information, but share the same object list ``self.objects``. >>> from pympler.refgraph import ReferenceGraph >>> a = 42 >>> b = 'spam' >>> c = {a: b} >>> t = (1,2,3) >>> rg = ReferenceGraph([a,b,c,t]) >>> for subgraph in rg.split(): ... print subgraph.index 0 1 """ self._annotate_groups() index = 0 for group in range(self._max_group): subgraph = copy(self) subgraph.metadata = self.metadata[:] subgraph.edges = self.edges.copy() if subgraph._filter_group(group): subgraph.total_size = sum([x.size for x in subgraph.metadata]) subgraph.index = index index += 1 yield subgraph
Split the graph into sub-graphs. Only connected objects belong to the same graph. `split` yields copies of the Graph object. Shallow copies are used that only replicate the meta-information, but share the same object list ``self.objects``. >>> from pympler.refgraph import ReferenceGraph >>> a = 42 >>> b = 'spam' >>> c = {a: b} >>> t = (1,2,3) >>> rg = ReferenceGraph([a,b,c,t]) >>> for subgraph in rg.split(): ... print subgraph.index 0 1
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L222-L252
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph.split_and_sort
def split_and_sort(self): """ Split the graphs into sub graphs and return a list of all graphs sorted by the number of nodes. The graph with most nodes is returned first. """ graphs = list(self.split()) graphs.sort(key=lambda x: -len(x.metadata)) for index, graph in enumerate(graphs): graph.index = index return graphs
python
def split_and_sort(self): """ Split the graphs into sub graphs and return a list of all graphs sorted by the number of nodes. The graph with most nodes is returned first. """ graphs = list(self.split()) graphs.sort(key=lambda x: -len(x.metadata)) for index, graph in enumerate(graphs): graph.index = index return graphs
Split the graphs into sub graphs and return a list of all graphs sorted by the number of nodes. The graph with most nodes is returned first.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L255-L264
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._annotate_objects
def _annotate_objects(self): """ Extract meta-data describing the stored objects. """ self.metadata = [] sizer = Asizer() sizes = sizer.asizesof(*self.objects) self.total_size = sizer.total for obj, sz in zip(self.objects, sizes): md = _MetaObject() md.size = sz md.id = id(obj) try: md.type = obj.__class__.__name__ except (AttributeError, ReferenceError): # pragma: no cover md.type = type(obj).__name__ md.str = safe_repr(obj, clip=128) self.metadata.append(md)
python
def _annotate_objects(self): """ Extract meta-data describing the stored objects. """ self.metadata = [] sizer = Asizer() sizes = sizer.asizesof(*self.objects) self.total_size = sizer.total for obj, sz in zip(self.objects, sizes): md = _MetaObject() md.size = sz md.id = id(obj) try: md.type = obj.__class__.__name__ except (AttributeError, ReferenceError): # pragma: no cover md.type = type(obj).__name__ md.str = safe_repr(obj, clip=128) self.metadata.append(md)
Extract meta-data describing the stored objects.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L267-L284
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph._get_graphviz_data
def _get_graphviz_data(self): """ Emit a graph representing the connections between the objects described within the metadata list. The text representation can be transformed to a graph with graphviz. Returns a string. """ s = [] header = '// Process this file with graphviz\n' s.append( header) s.append('digraph G {\n') s.append(' node [shape=box];\n') for md in self.metadata: label = trunc(md.str, 48).replace('"', "'") extra = '' if md.type == 'instancemethod': extra = ', color=red' elif md.type == 'frame': extra = ', color=orange' s.append(' "X%s" [ label = "%s\\n%s" %s ];\n' % \ (hex(md.id)[1:], label, md.type, extra)) for e in self.edges: extra = '' if e.label == '__dict__': extra = ',weight=100' s.append(' X%s -> X%s [label="%s"%s];\n' % \ (hex(e.src)[1:], hex(e.dst)[1:], e.label, extra)) s.append('}\n') return "".join(s)
python
def _get_graphviz_data(self): """ Emit a graph representing the connections between the objects described within the metadata list. The text representation can be transformed to a graph with graphviz. Returns a string. """ s = [] header = '// Process this file with graphviz\n' s.append( header) s.append('digraph G {\n') s.append(' node [shape=box];\n') for md in self.metadata: label = trunc(md.str, 48).replace('"', "'") extra = '' if md.type == 'instancemethod': extra = ', color=red' elif md.type == 'frame': extra = ', color=orange' s.append(' "X%s" [ label = "%s\\n%s" %s ];\n' % \ (hex(md.id)[1:], label, md.type, extra)) for e in self.edges: extra = '' if e.label == '__dict__': extra = ',weight=100' s.append(' X%s -> X%s [label="%s"%s];\n' % \ (hex(e.src)[1:], hex(e.dst)[1:], e.label, extra)) s.append('}\n') return "".join(s)
Emit a graph representing the connections between the objects described within the metadata list. The text representation can be transformed to a graph with graphviz. Returns a string.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L287-L315
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph.render
def render(self, filename, cmd='dot', format='ps', unflatten=False): """ Render the graph to `filename` using graphviz. The graphviz invocation command may be overriden by specifying `cmd`. The `format` may be any specifier recognized by the graph renderer ('-Txxx' command). The graph can be preprocessed by the *unflatten* tool if the `unflatten` parameter is True. If there are no objects to illustrate, the method does not invoke graphviz and returns False. If the renderer returns successfully (return code 0), True is returned. An `OSError` is raised if the graphviz tool cannot be found. """ if self.objects == []: return False data = self._get_graphviz_data() options = ('-Nfontsize=10', '-Efontsize=10', '-Nstyle=filled', '-Nfillcolor=#E5EDB8', '-Ncolor=#CCCCCC') cmdline = (cmd, '-T%s' % format, '-o', filename) + options if unflatten: p1 = Popen(('unflatten', '-l7'), stdin=PIPE, stdout=PIPE, **popen_flags) p2 = Popen(cmdline, stdin=p1.stdout, **popen_flags) p1.communicate(encode4pipe(data)) p2.communicate() return p2.returncode == 0 else: p = Popen(cmdline, stdin=PIPE, **popen_flags) p.communicate(encode4pipe(data)) return p.returncode == 0
python
def render(self, filename, cmd='dot', format='ps', unflatten=False): """ Render the graph to `filename` using graphviz. The graphviz invocation command may be overriden by specifying `cmd`. The `format` may be any specifier recognized by the graph renderer ('-Txxx' command). The graph can be preprocessed by the *unflatten* tool if the `unflatten` parameter is True. If there are no objects to illustrate, the method does not invoke graphviz and returns False. If the renderer returns successfully (return code 0), True is returned. An `OSError` is raised if the graphviz tool cannot be found. """ if self.objects == []: return False data = self._get_graphviz_data() options = ('-Nfontsize=10', '-Efontsize=10', '-Nstyle=filled', '-Nfillcolor=#E5EDB8', '-Ncolor=#CCCCCC') cmdline = (cmd, '-T%s' % format, '-o', filename) + options if unflatten: p1 = Popen(('unflatten', '-l7'), stdin=PIPE, stdout=PIPE, **popen_flags) p2 = Popen(cmdline, stdin=p1.stdout, **popen_flags) p1.communicate(encode4pipe(data)) p2.communicate() return p2.returncode == 0 else: p = Popen(cmdline, stdin=PIPE, **popen_flags) p.communicate(encode4pipe(data)) return p.returncode == 0
Render the graph to `filename` using graphviz. The graphviz invocation command may be overriden by specifying `cmd`. The `format` may be any specifier recognized by the graph renderer ('-Txxx' command). The graph can be preprocessed by the *unflatten* tool if the `unflatten` parameter is True. If there are no objects to illustrate, the method does not invoke graphviz and returns False. If the renderer returns successfully (return code 0), True is returned. An `OSError` is raised if the graphviz tool cannot be found.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L318-L352
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
ReferenceGraph.write_graph
def write_graph(self, filename): """ Write raw graph data which can be post-processed using graphviz. """ f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
python
def write_graph(self, filename): """ Write raw graph data which can be post-processed using graphviz. """ f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
Write raw graph data which can be post-processed using graphviz.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L355-L361
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pyinstrument/profiler.py
Profiler.root_frame
def root_frame(self): """ Returns the parsed results in the form of a tree of Frame objects """ if not hasattr(self, '_root_frame'): self._root_frame = Frame() # define a recursive function that builds the hierarchy of frames given the # stack of frame identifiers def frame_for_stack(stack): if len(stack) == 0: return self._root_frame parent = frame_for_stack(stack[:-1]) frame_name = stack[-1] if not frame_name in parent.children_dict: parent.add_child(Frame(frame_name, parent)) return parent.children_dict[frame_name] for stack, self_time in self.stack_self_time.items(): frame_for_stack(stack).self_time = self_time return self._root_frame
python
def root_frame(self): """ Returns the parsed results in the form of a tree of Frame objects """ if not hasattr(self, '_root_frame'): self._root_frame = Frame() # define a recursive function that builds the hierarchy of frames given the # stack of frame identifiers def frame_for_stack(stack): if len(stack) == 0: return self._root_frame parent = frame_for_stack(stack[:-1]) frame_name = stack[-1] if not frame_name in parent.children_dict: parent.add_child(Frame(frame_name, parent)) return parent.children_dict[frame_name] for stack, self_time in self.stack_self_time.items(): frame_for_stack(stack).self_time = self_time return self._root_frame
Returns the parsed results in the form of a tree of Frame objects
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pyinstrument/profiler.py#L109-L133
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
reset_trace
def reset_trace(): """Resets all collected statistics. This is run automatically by start_trace(reset=True) and when the module is loaded. """ global call_dict global call_stack global func_count global func_count_max global func_time global func_time_max global call_stack_timer call_dict = {} # current call stack call_stack = ['__main__'] # counters for each function func_count = {} func_count_max = 0 # accumative time per function func_time = {} func_time_max = 0 # keeps track of the start time of each call on the stack call_stack_timer = []
python
def reset_trace(): """Resets all collected statistics. This is run automatically by start_trace(reset=True) and when the module is loaded. """ global call_dict global call_stack global func_count global func_count_max global func_time global func_time_max global call_stack_timer call_dict = {} # current call stack call_stack = ['__main__'] # counters for each function func_count = {} func_count_max = 0 # accumative time per function func_time = {} func_time_max = 0 # keeps track of the start time of each call on the stack call_stack_timer = []
Resets all collected statistics. This is run automatically by start_trace(reset=True) and when the module is loaded.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L92-L118
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
is_module_stdlib
def is_module_stdlib(file_name): """Returns True if the file_name is in the lib directory.""" # TODO: Move these calls away from this function so it doesn't have to run # every time. lib_path = sysconfig.get_python_lib() path = os.path.split(lib_path) if path[1] == 'site-packages': lib_path = path[0] return file_name.lower().startswith(lib_path.lower())
python
def is_module_stdlib(file_name): """Returns True if the file_name is in the lib directory.""" # TODO: Move these calls away from this function so it doesn't have to run # every time. lib_path = sysconfig.get_python_lib() path = os.path.split(lib_path) if path[1] == 'site-packages': lib_path = path[0] return file_name.lower().startswith(lib_path.lower())
Returns True if the file_name is in the lib directory.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L169-L177
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
start_trace
def start_trace(reset=True, filter_func=None, time_filter_func=None): """Begins a trace. Setting reset to True will reset all previously recorded trace data. filter_func needs to point to a callable function that accepts the parameters (call_stack, module_name, class_name, func_name, full_name). Every call will be passed into this function and it is up to the function to decide if it should be included or not. Returning False means the call will be filtered out and not included in the call graph. """ global trace_filter global time_filter if reset: reset_trace() if filter_func: trace_filter = filter_func else: trace_filter = GlobbingFilter(exclude=['pycallgraph.*']) if time_filter_func: time_filter = time_filter_func else: time_filter = GlobbingFilter() sys.settrace(tracer)
python
def start_trace(reset=True, filter_func=None, time_filter_func=None): """Begins a trace. Setting reset to True will reset all previously recorded trace data. filter_func needs to point to a callable function that accepts the parameters (call_stack, module_name, class_name, func_name, full_name). Every call will be passed into this function and it is up to the function to decide if it should be included or not. Returning False means the call will be filtered out and not included in the call graph. """ global trace_filter global time_filter if reset: reset_trace() if filter_func: trace_filter = filter_func else: trace_filter = GlobbingFilter(exclude=['pycallgraph.*']) if time_filter_func: time_filter = time_filter_func else: time_filter = GlobbingFilter() sys.settrace(tracer)
Begins a trace. Setting reset to True will reset all previously recorded trace data. filter_func needs to point to a callable function that accepts the parameters (call_stack, module_name, class_name, func_name, full_name). Every call will be passed into this function and it is up to the function to decide if it should be included or not. Returning False means the call will be filtered out and not included in the call graph.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L180-L203
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
tracer
def tracer(frame, event, arg): """This is an internal function that is called every time a call is made during a trace. It keeps track of relationships between calls. """ global func_count_max global func_count global trace_filter global time_filter global call_stack global func_time global func_time_max if event == 'call': keep = True code = frame.f_code # Stores all the parts of a human readable name of the current call. full_name_list = [] # Work out the module name module = inspect.getmodule(code) if module: module_name = module.__name__ module_path = module.__file__ if not settings['include_stdlib'] \ and is_module_stdlib(module_path): keep = False if module_name == '__main__': module_name = '' else: module_name = '' if module_name: full_name_list.append(module_name) # Work out the class name. try: class_name = frame.f_locals['self'].__class__.__name__ full_name_list.append(class_name) except (KeyError, AttributeError): class_name = '' # Work out the current function or method func_name = code.co_name if func_name == '?': func_name = '__main__' full_name_list.append(func_name) # Create a readable representation of the current call full_name = '.'.join(full_name_list) # Load the trace filter, if any. 'keep' determines if we should ignore # this call if keep and trace_filter: keep = trace_filter(call_stack, module_name, class_name, func_name, full_name) # Store the call information if keep: if call_stack: fr = call_stack[-1] else: fr = None if fr not in call_dict: call_dict[fr] = {} if full_name not in call_dict[fr]: call_dict[fr][full_name] = 0 call_dict[fr][full_name] += 1 if full_name not in func_count: func_count[full_name] = 0 func_count[full_name] += 1 if func_count[full_name] > func_count_max: func_count_max = func_count[full_name] call_stack.append(full_name) call_stack_timer.append(time.time()) else: call_stack.append('') call_stack_timer.append(None) if event == 'return': if call_stack: full_name = call_stack.pop(-1) if call_stack_timer: t = call_stack_timer.pop(-1) else: t = None if t and time_filter(stack=call_stack, full_name=full_name): if full_name not in func_time: func_time[full_name] = 0 call_time = (time.time() - t) func_time[full_name] += call_time if func_time[full_name] > func_time_max: func_time_max = func_time[full_name] return tracer
python
def tracer(frame, event, arg): """This is an internal function that is called every time a call is made during a trace. It keeps track of relationships between calls. """ global func_count_max global func_count global trace_filter global time_filter global call_stack global func_time global func_time_max if event == 'call': keep = True code = frame.f_code # Stores all the parts of a human readable name of the current call. full_name_list = [] # Work out the module name module = inspect.getmodule(code) if module: module_name = module.__name__ module_path = module.__file__ if not settings['include_stdlib'] \ and is_module_stdlib(module_path): keep = False if module_name == '__main__': module_name = '' else: module_name = '' if module_name: full_name_list.append(module_name) # Work out the class name. try: class_name = frame.f_locals['self'].__class__.__name__ full_name_list.append(class_name) except (KeyError, AttributeError): class_name = '' # Work out the current function or method func_name = code.co_name if func_name == '?': func_name = '__main__' full_name_list.append(func_name) # Create a readable representation of the current call full_name = '.'.join(full_name_list) # Load the trace filter, if any. 'keep' determines if we should ignore # this call if keep and trace_filter: keep = trace_filter(call_stack, module_name, class_name, func_name, full_name) # Store the call information if keep: if call_stack: fr = call_stack[-1] else: fr = None if fr not in call_dict: call_dict[fr] = {} if full_name not in call_dict[fr]: call_dict[fr][full_name] = 0 call_dict[fr][full_name] += 1 if full_name not in func_count: func_count[full_name] = 0 func_count[full_name] += 1 if func_count[full_name] > func_count_max: func_count_max = func_count[full_name] call_stack.append(full_name) call_stack_timer.append(time.time()) else: call_stack.append('') call_stack_timer.append(None) if event == 'return': if call_stack: full_name = call_stack.pop(-1) if call_stack_timer: t = call_stack_timer.pop(-1) else: t = None if t and time_filter(stack=call_stack, full_name=full_name): if full_name not in func_time: func_time[full_name] = 0 call_time = (time.time() - t) func_time[full_name] += call_time if func_time[full_name] > func_time_max: func_time_max = func_time[full_name] return tracer
This is an internal function that is called every time a call is made during a trace. It keeps track of relationships between calls.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L211-L308
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
get_dot
def get_dot(stop=True): """Returns a string containing a DOT file. Setting stop to True will cause the trace to stop. """ defaults = [] nodes = [] edges = [] # define default attributes for comp, comp_attr in graph_attributes.items(): attr = ', '.join( '%s = "%s"' % (attr, val) for attr, val in comp_attr.items() ) defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() ) # define nodes for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()] node_str = '"%s" [%s];' % (func, ', '.join(attribs)) nodes.append( node_str % locals() ) # define edges for fr_key, fr_val in call_dict.items(): if not fr_key: continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, totla_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) edge = '[ color = "%s", label="%s" ]' % (col, to_val) edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge)) defaults = '\n\t'.join( defaults ) nodes = '\n\t'.join( nodes ) edges = '\n\t'.join( edges ) dot_fmt = ("digraph G {\n" " %(defaults)s\n\n" " %(nodes)s\n\n" " %(edges)s\n}\n" ) return dot_fmt % locals()
python
def get_dot(stop=True): """Returns a string containing a DOT file. Setting stop to True will cause the trace to stop. """ defaults = [] nodes = [] edges = [] # define default attributes for comp, comp_attr in graph_attributes.items(): attr = ', '.join( '%s = "%s"' % (attr, val) for attr, val in comp_attr.items() ) defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() ) # define nodes for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()] node_str = '"%s" [%s];' % (func, ', '.join(attribs)) nodes.append( node_str % locals() ) # define edges for fr_key, fr_val in call_dict.items(): if not fr_key: continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, totla_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) edge = '[ color = "%s", label="%s" ]' % (col, to_val) edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge)) defaults = '\n\t'.join( defaults ) nodes = '\n\t'.join( nodes ) edges = '\n\t'.join( edges ) dot_fmt = ("digraph G {\n" " %(defaults)s\n\n" " %(nodes)s\n\n" " %(edges)s\n}\n" ) return dot_fmt % locals()
Returns a string containing a DOT file. Setting stop to True will cause the trace to stop.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L327-L369
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
get_gdf
def get_gdf(stop=True): """Returns a string containing a GDF file. Setting stop to True will cause the trace to stop. """ ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \ 'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \ 'total_time DOUBLE, color VARCHAR, width DOUBLE'] for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \ calls_frac, total_time_frac, total_time, color, \ math.log(hits * 10))) ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR') for fr_key, fr_val in call_dict.items(): if fr_key == '': continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, total_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color)) ret = '\n'.join(ret) return ret
python
def get_gdf(stop=True): """Returns a string containing a GDF file. Setting stop to True will cause the trace to stop. """ ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \ 'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \ 'total_time DOUBLE, color VARCHAR, width DOUBLE'] for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \ calls_frac, total_time_frac, total_time, color, \ math.log(hits * 10))) ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR') for fr_key, fr_val in call_dict.items(): if fr_key == '': continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, total_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color)) ret = '\n'.join(ret) return ret
Returns a string containing a GDF file. Setting stop to True will cause the trace to stop.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L372-L397
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
make_dot_graph
def make_dot_graph(filename, format='png', tool='dot', stop=True): """Creates a graph using a Graphviz tool that supports the dot language. It will output into a file specified by filename with the format specified. Setting stop to True will stop the current trace. """ if stop: stop_trace() dot_data = get_dot() # normalize filename regex_user_expand = re.compile('\A~') if regex_user_expand.match(filename): filename = os.path.expanduser(filename) else: filename = os.path.expandvars(filename) # expand, just in case if format == 'dot': f = open(filename, 'w') f.write(dot_data) f.close() else: # create a temporary file to be used for the dot data fd, tempname = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(dot_data) cmd = '%(tool)s -T%(format)s -o%(filename)s %(tempname)s' % locals() try: ret = os.system(cmd) if ret: raise PyCallGraphException( \ 'The command "%(cmd)s" failed with error ' \ 'code %(ret)i.' % locals()) finally: os.unlink(tempname)
python
def make_dot_graph(filename, format='png', tool='dot', stop=True): """Creates a graph using a Graphviz tool that supports the dot language. It will output into a file specified by filename with the format specified. Setting stop to True will stop the current trace. """ if stop: stop_trace() dot_data = get_dot() # normalize filename regex_user_expand = re.compile('\A~') if regex_user_expand.match(filename): filename = os.path.expanduser(filename) else: filename = os.path.expandvars(filename) # expand, just in case if format == 'dot': f = open(filename, 'w') f.write(dot_data) f.close() else: # create a temporary file to be used for the dot data fd, tempname = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(dot_data) cmd = '%(tool)s -T%(format)s -o%(filename)s %(tempname)s' % locals() try: ret = os.system(cmd) if ret: raise PyCallGraphException( \ 'The command "%(cmd)s" failed with error ' \ 'code %(ret)i.' % locals()) finally: os.unlink(tempname)
Creates a graph using a Graphviz tool that supports the dot language. It will output into a file specified by filename with the format specified. Setting stop to True will stop the current trace.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L416-L452
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
make_gdf_graph
def make_gdf_graph(filename, stop=True): """Create a graph in simple GDF format, suitable for feeding into Gephi, or some other graph manipulation and display tool. Setting stop to True will stop the current trace. """ if stop: stop_trace() try: f = open(filename, 'w') f.write(get_gdf()) finally: if f: f.close()
python
def make_gdf_graph(filename, stop=True): """Create a graph in simple GDF format, suitable for feeding into Gephi, or some other graph manipulation and display tool. Setting stop to True will stop the current trace. """ if stop: stop_trace() try: f = open(filename, 'w') f.write(get_gdf()) finally: if f: f.close()
Create a graph in simple GDF format, suitable for feeding into Gephi, or some other graph manipulation and display tool. Setting stop to True will stop the current trace.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L455-L467
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pycallgraph.py
simple_memoize
def simple_memoize(callable_object): """Simple memoization for functions without keyword arguments. This is useful for mapping code objects to module in this context. inspect.getmodule() requires a number of system calls, which may slow down the tracing considerably. Caching the mapping from code objects (there is *one* code object for each function, regardless of how many simultaneous activations records there are). In this context we can ignore keyword arguments, but a generic memoizer ought to take care of that as well. """ cache = dict() def wrapper(*rest): if rest not in cache: cache[rest] = callable_object(*rest) return cache[rest] return wrapper
python
def simple_memoize(callable_object): """Simple memoization for functions without keyword arguments. This is useful for mapping code objects to module in this context. inspect.getmodule() requires a number of system calls, which may slow down the tracing considerably. Caching the mapping from code objects (there is *one* code object for each function, regardless of how many simultaneous activations records there are). In this context we can ignore keyword arguments, but a generic memoizer ought to take care of that as well. """ cache = dict() def wrapper(*rest): if rest not in cache: cache[rest] = callable_object(*rest) return cache[rest] return wrapper
Simple memoization for functions without keyword arguments. This is useful for mapping code objects to module in this context. inspect.getmodule() requires a number of system calls, which may slow down the tracing considerably. Caching the mapping from code objects (there is *one* code object for each function, regardless of how many simultaneous activations records there are). In this context we can ignore keyword arguments, but a generic memoizer ought to take care of that as well.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L470-L490
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/macshim.py
macshim
def macshim(): """Shim to run 32-bit on 64-bit mac as a sub-process""" import subprocess, sys subprocess.call([ sys.argv[0] + '32' ]+sys.argv[1:], env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"} )
python
def macshim(): """Shim to run 32-bit on 64-bit mac as a sub-process""" import subprocess, sys subprocess.call([ sys.argv[0] + '32' ]+sys.argv[1:], env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"} )
Shim to run 32-bit on 64-bit mac as a sub-process
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/macshim.py#L1-L8
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pyinstrument/__main__.py
file_supports_color
def file_supports_color(file_obj): """ Returns True if the running system's terminal supports color, and False otherwise. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = hasattr(file_obj, 'isatty') and file_obj.isatty() if not supported_platform or not is_a_tty: return False return True
python
def file_supports_color(file_obj): """ Returns True if the running system's terminal supports color, and False otherwise. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = hasattr(file_obj, 'isatty') and file_obj.isatty() if not supported_platform or not is_a_tty: return False return True
Returns True if the running system's terminal supports color, and False otherwise. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pyinstrument/__main__.py#L129-L144
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
compute_ecc_params
def compute_ecc_params(max_block_size, rate, hasher): '''Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.''' #message_size = max_block_size - int(round(max_block_size * rate * 2, 0)) # old way to compute, wasn't really correct because we applied the rate on the total message+ecc size, when we should apply the rate to the message size only (that is not known beforehand, but we want the ecc size (k) = 2*rate*message_size or in other words that k + k * 2 * rate = n) message_size = int(round(float(max_block_size) / (1 + 2*rate), 0)) ecc_size = max_block_size - message_size hash_size = len(hasher) # 32 when we use MD5 return {"message_size": message_size, "ecc_size": ecc_size, "hash_size": hash_size}
python
def compute_ecc_params(max_block_size, rate, hasher): '''Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.''' #message_size = max_block_size - int(round(max_block_size * rate * 2, 0)) # old way to compute, wasn't really correct because we applied the rate on the total message+ecc size, when we should apply the rate to the message size only (that is not known beforehand, but we want the ecc size (k) = 2*rate*message_size or in other words that k + k * 2 * rate = n) message_size = int(round(float(max_block_size) / (1 + 2*rate), 0)) ecc_size = max_block_size - message_size hash_size = len(hasher) # 32 when we use MD5 return {"message_size": message_size, "ecc_size": ecc_size, "hash_size": hash_size}
Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L48-L54
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
detect_reedsolomon_parameters
def detect_reedsolomon_parameters(message, mesecc_orig, gen_list=[2, 3, 5], c_exp=8): '''Use an exhaustive search to automatically find the correct parameters for the ReedSolomon codec from a sample message and its encoded RS code. Arguments: message is the sample message, eg, "hello world" ; mesecc_orig is the message variable encoded with RS block appended at the end. ''' # Description: this is basically an exhaustive search where we will try every possible RS parameter, then try to encode the sample message, and see if the resulting RS code is close to the supplied code. # All variables except the Galois Field's exponent are automatically generated and searched. # To compare with the supplied RS code, we compute the Hamming distance, so that even if the RS code is tampered, we can still find the closest set of RS parameters to decode this message. # The goal is to provide users a function so that they can use the "hello world" sample string in generated ECC files to recover their RS parameters in case they forget them. But users can use any sample message: for example, if they have an untampered file and its relative ecc track, they can use the ecc track as the mesecc_orig and their original file as the sample message. from .reedsolomon import reedsolo as reedsolop # need to import another time the reedsolo library for detect_reedsolomon_parameters to work (because we need to reinit all the tables, and they are declared module-wide, so this would conflict with decoding) # Init the variables n = len(mesecc_orig) k = len(message) field_charac = int((2**c_exp) - 1) maxval1 = max([ord(x) if isinstance(x, basestring) else x for x in message ]) maxval2 = max([ord(x) if isinstance(x, basestring) else x for x in mesecc_orig]) maxval = max([maxval1, maxval2]) if (maxval > field_charac): raise ValueError("The specified field's exponent is wrong, the message contains values (%i) above the field's cardinality (%i)!" % (maxval, field_charac)) # Prepare the variable that will store the result best_match = {"hscore": -1, "params": [{"gen_nb": 0, "prim": 0, "fcr": 0}]} # Exhaustively search by generating every combination of values for the RS parameters and test the Hamming distance for gen_nb in gen_list: prim_list = reedsolop.find_prime_polys(generator=gen_nb, c_exp=c_exp, fast_primes=False, single=False) for prim in prim_list: reedsolop.init_tables(prim) for fcr in xrange(field_charac): #g = reedsolop.rs_generator_poly_all(n, fcr=fcr, generator=gen_nb) # Generate a RS code from the sample message using the current combination of RS parameters mesecc = reedsolop.rs_encode_msg(message, n-k, fcr=fcr) # Compute the Hamming distance h = hamming(mesecc, mesecc_orig) # If the Hamming distance is lower than the previous best match (or if it's the first try), save this set of parameters if best_match["hscore"] == -1 or h <= best_match["hscore"]: # If the distance is strictly lower than for the previous match, then we replace the previous match with the current one if best_match["hscore"] == -1 or h < best_match["hscore"]: best_match["hscore"] = h best_match["params"] = [{"gen_nb": gen_nb, "prim": prim, "fcr": fcr}] # Else there is an ambiguity: the Hamming distance is the same as for the previous best match, so we keep the previous set of parameters but we append the current set elif h == best_match["hscore"]: best_match["params"].append({"gen_nb": gen_nb, "prim": prim, "fcr": fcr}) # If Hamming distance is 0, then we have found a perfect match (the current set of parameters allow to generate the exact same RS code from the sample message), so we stop here if h == 0: break # Printing the results to the user if best_match["hscore"] >= 0 and best_match["hscore"] < len(mesecc_orig): perfect_match_str = " (0=perfect match)" if best_match["hscore"]==0 else "" result = '' result += "Found closest set of parameters, with Hamming distance %i%s:\n" % (best_match["hscore"], perfect_match_str) for param in best_match["params"]: result += "gen_nb=%s prim=%s(%s) fcr=%s\n" % (param["gen_nb"], param["prim"], hex(param["prim"]), param["fcr"]) return result else: return "Parameters could not be automatically detected..."
python
def detect_reedsolomon_parameters(message, mesecc_orig, gen_list=[2, 3, 5], c_exp=8): '''Use an exhaustive search to automatically find the correct parameters for the ReedSolomon codec from a sample message and its encoded RS code. Arguments: message is the sample message, eg, "hello world" ; mesecc_orig is the message variable encoded with RS block appended at the end. ''' # Description: this is basically an exhaustive search where we will try every possible RS parameter, then try to encode the sample message, and see if the resulting RS code is close to the supplied code. # All variables except the Galois Field's exponent are automatically generated and searched. # To compare with the supplied RS code, we compute the Hamming distance, so that even if the RS code is tampered, we can still find the closest set of RS parameters to decode this message. # The goal is to provide users a function so that they can use the "hello world" sample string in generated ECC files to recover their RS parameters in case they forget them. But users can use any sample message: for example, if they have an untampered file and its relative ecc track, they can use the ecc track as the mesecc_orig and their original file as the sample message. from .reedsolomon import reedsolo as reedsolop # need to import another time the reedsolo library for detect_reedsolomon_parameters to work (because we need to reinit all the tables, and they are declared module-wide, so this would conflict with decoding) # Init the variables n = len(mesecc_orig) k = len(message) field_charac = int((2**c_exp) - 1) maxval1 = max([ord(x) if isinstance(x, basestring) else x for x in message ]) maxval2 = max([ord(x) if isinstance(x, basestring) else x for x in mesecc_orig]) maxval = max([maxval1, maxval2]) if (maxval > field_charac): raise ValueError("The specified field's exponent is wrong, the message contains values (%i) above the field's cardinality (%i)!" % (maxval, field_charac)) # Prepare the variable that will store the result best_match = {"hscore": -1, "params": [{"gen_nb": 0, "prim": 0, "fcr": 0}]} # Exhaustively search by generating every combination of values for the RS parameters and test the Hamming distance for gen_nb in gen_list: prim_list = reedsolop.find_prime_polys(generator=gen_nb, c_exp=c_exp, fast_primes=False, single=False) for prim in prim_list: reedsolop.init_tables(prim) for fcr in xrange(field_charac): #g = reedsolop.rs_generator_poly_all(n, fcr=fcr, generator=gen_nb) # Generate a RS code from the sample message using the current combination of RS parameters mesecc = reedsolop.rs_encode_msg(message, n-k, fcr=fcr) # Compute the Hamming distance h = hamming(mesecc, mesecc_orig) # If the Hamming distance is lower than the previous best match (or if it's the first try), save this set of parameters if best_match["hscore"] == -1 or h <= best_match["hscore"]: # If the distance is strictly lower than for the previous match, then we replace the previous match with the current one if best_match["hscore"] == -1 or h < best_match["hscore"]: best_match["hscore"] = h best_match["params"] = [{"gen_nb": gen_nb, "prim": prim, "fcr": fcr}] # Else there is an ambiguity: the Hamming distance is the same as for the previous best match, so we keep the previous set of parameters but we append the current set elif h == best_match["hscore"]: best_match["params"].append({"gen_nb": gen_nb, "prim": prim, "fcr": fcr}) # If Hamming distance is 0, then we have found a perfect match (the current set of parameters allow to generate the exact same RS code from the sample message), so we stop here if h == 0: break # Printing the results to the user if best_match["hscore"] >= 0 and best_match["hscore"] < len(mesecc_orig): perfect_match_str = " (0=perfect match)" if best_match["hscore"]==0 else "" result = '' result += "Found closest set of parameters, with Hamming distance %i%s:\n" % (best_match["hscore"], perfect_match_str) for param in best_match["params"]: result += "gen_nb=%s prim=%s(%s) fcr=%s\n" % (param["gen_nb"], param["prim"], hex(param["prim"]), param["fcr"]) return result else: return "Parameters could not be automatically detected..."
Use an exhaustive search to automatically find the correct parameters for the ReedSolomon codec from a sample message and its encoded RS code. Arguments: message is the sample message, eg, "hello world" ; mesecc_orig is the message variable encoded with RS block appended at the end.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L56-L112
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.encode
def encode(self, message, k=None): '''Encode one message block (up to 255) into an ecc''' if not k: k = self.k message, _ = self.pad(message, k=k) if self.algo == 1: mesecc = self.ecc_manager.encode(message, k=k) elif self.algo == 2: mesecc = self.ecc_manager.encode_fast(message, k=k) elif self.algo == 3 or self.algo == 4: mesecc = rs_encode_msg(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) #mesecc = rs_encode_msg_precomp(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) ecc = mesecc[len(message):] return ecc
python
def encode(self, message, k=None): '''Encode one message block (up to 255) into an ecc''' if not k: k = self.k message, _ = self.pad(message, k=k) if self.algo == 1: mesecc = self.ecc_manager.encode(message, k=k) elif self.algo == 2: mesecc = self.ecc_manager.encode_fast(message, k=k) elif self.algo == 3 or self.algo == 4: mesecc = rs_encode_msg(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) #mesecc = rs_encode_msg_precomp(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) ecc = mesecc[len(message):] return ecc
Encode one message block (up to 255) into an ecc
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L153-L166
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.decode
def decode(self, message, ecc, k=None, enable_erasures=False, erasures_char="\x00", only_erasures=False): '''Repair a message and its ecc also, given the message and its ecc (both can be corrupted, we will still try to fix both of them)''' if not k: k = self.k # Optimization, use bytearray if isinstance(message, _str): message = bytearray([ord(x) for x in message]) ecc = bytearray([ord(x) for x in ecc]) # Detect erasures positions and replace with null bytes (replacing erasures with null bytes is necessary for correct syndrome computation) # Note that this must be done before padding, else we risk counting the padded null bytes as erasures! erasures_pos = None if enable_erasures: # Concatenate to find erasures in the whole codeword mesecc = message + ecc # Convert char to a int (because we use a bytearray) if isinstance(erasures_char, _str): erasures_char = ord(erasures_char) # Find the positions of the erased characters erasures_pos = [i for i in xrange(len(mesecc)) if mesecc[i] == erasures_char] # Failing case: no erasures could be found and we want to only correct erasures, then we return the message as-is if only_erasures and not erasures_pos: return message, ecc # Pad with null bytes if necessary message, pad = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) # fill ecc with null bytes if too small (maybe the field delimiters were misdetected and this truncated the ecc? But we maybe still can correct if the truncation is less than the resilience rate) # If the message was left padded, then we need to update the positions of the erasures if erasures_pos and pad: len_pad = len(pad) erasures_pos = [x+len_pad for x in erasures_pos] # Decoding if self.algo == 1: msg_repaired, ecc_repaired = self.ecc_manager.decode(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) # Avoid automatic stripping because we are working with binary streams, thus we should manually strip padding only when we know we padded elif self.algo == 2: msg_repaired, ecc_repaired = self.ecc_manager.decode_fast(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) elif self.algo == 3: #msg_repaired, ecc_repaired = self.ecc_manager.decode_fast(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) msg_repaired, ecc_repaired = reedsolo.rs_correct_msg_nofsynd(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb, erase_pos=erasures_pos, only_erasures=only_erasures) msg_repaired = bytearray(msg_repaired) ecc_repaired = bytearray(ecc_repaired) elif self.algo == 4: msg_repaired, ecc_repaired = reedsolo.rs_correct_msg(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb, erase_pos=erasures_pos, only_erasures=only_erasures) msg_repaired = bytearray(msg_repaired) ecc_repaired = bytearray(ecc_repaired) if pad: # Strip the null bytes if we padded the message before decoding msg_repaired = msg_repaired[len(pad):len(msg_repaired)] return msg_repaired, ecc_repaired
python
def decode(self, message, ecc, k=None, enable_erasures=False, erasures_char="\x00", only_erasures=False): '''Repair a message and its ecc also, given the message and its ecc (both can be corrupted, we will still try to fix both of them)''' if not k: k = self.k # Optimization, use bytearray if isinstance(message, _str): message = bytearray([ord(x) for x in message]) ecc = bytearray([ord(x) for x in ecc]) # Detect erasures positions and replace with null bytes (replacing erasures with null bytes is necessary for correct syndrome computation) # Note that this must be done before padding, else we risk counting the padded null bytes as erasures! erasures_pos = None if enable_erasures: # Concatenate to find erasures in the whole codeword mesecc = message + ecc # Convert char to a int (because we use a bytearray) if isinstance(erasures_char, _str): erasures_char = ord(erasures_char) # Find the positions of the erased characters erasures_pos = [i for i in xrange(len(mesecc)) if mesecc[i] == erasures_char] # Failing case: no erasures could be found and we want to only correct erasures, then we return the message as-is if only_erasures and not erasures_pos: return message, ecc # Pad with null bytes if necessary message, pad = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) # fill ecc with null bytes if too small (maybe the field delimiters were misdetected and this truncated the ecc? But we maybe still can correct if the truncation is less than the resilience rate) # If the message was left padded, then we need to update the positions of the erasures if erasures_pos and pad: len_pad = len(pad) erasures_pos = [x+len_pad for x in erasures_pos] # Decoding if self.algo == 1: msg_repaired, ecc_repaired = self.ecc_manager.decode(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) # Avoid automatic stripping because we are working with binary streams, thus we should manually strip padding only when we know we padded elif self.algo == 2: msg_repaired, ecc_repaired = self.ecc_manager.decode_fast(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) elif self.algo == 3: #msg_repaired, ecc_repaired = self.ecc_manager.decode_fast(message + ecc, nostrip=True, k=k, erasures_pos=erasures_pos, only_erasures=only_erasures) msg_repaired, ecc_repaired = reedsolo.rs_correct_msg_nofsynd(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb, erase_pos=erasures_pos, only_erasures=only_erasures) msg_repaired = bytearray(msg_repaired) ecc_repaired = bytearray(ecc_repaired) elif self.algo == 4: msg_repaired, ecc_repaired = reedsolo.rs_correct_msg(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb, erase_pos=erasures_pos, only_erasures=only_erasures) msg_repaired = bytearray(msg_repaired) ecc_repaired = bytearray(ecc_repaired) if pad: # Strip the null bytes if we padded the message before decoding msg_repaired = msg_repaired[len(pad):len(msg_repaired)] return msg_repaired, ecc_repaired
Repair a message and its ecc also, given the message and its ecc (both can be corrupted, we will still try to fix both of them)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L168-L216
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.pad
def pad(self, message, k=None): '''Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).''' if not k: k = self.k pad = None if len(message) < k: #pad = "\x00" * (k-len(message)) pad = bytearray(k-len(message)) message = pad + message return [message, pad]
python
def pad(self, message, k=None): '''Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).''' if not k: k = self.k pad = None if len(message) < k: #pad = "\x00" * (k-len(message)) pad = bytearray(k-len(message)) message = pad + message return [message, pad]
Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L218-L226
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.rpad
def rpad(self, ecc, k=None): '''Automatically right pad with null bytes an ecc to fill for missing bytes if too small, or leave unchanged if not necessary. This can be used as a workaround for field delimiter misdetection. Equivalent to puncturing (punctured reed-solomon code).''' if not k: k = self.k pad = None if len(ecc) < self.n-k: print("Warning: the ecc field may have been truncated (entrymarker or field_delim misdetection?).") #pad = "\x00" * (self.n-k-len(ecc)) pad = bytearray(self.n-k-len(ecc)) ecc = ecc + pad return [ecc, pad]
python
def rpad(self, ecc, k=None): '''Automatically right pad with null bytes an ecc to fill for missing bytes if too small, or leave unchanged if not necessary. This can be used as a workaround for field delimiter misdetection. Equivalent to puncturing (punctured reed-solomon code).''' if not k: k = self.k pad = None if len(ecc) < self.n-k: print("Warning: the ecc field may have been truncated (entrymarker or field_delim misdetection?).") #pad = "\x00" * (self.n-k-len(ecc)) pad = bytearray(self.n-k-len(ecc)) ecc = ecc + pad return [ecc, pad]
Automatically right pad with null bytes an ecc to fill for missing bytes if too small, or leave unchanged if not necessary. This can be used as a workaround for field delimiter misdetection. Equivalent to puncturing (punctured reed-solomon code).
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L228-L237
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.check
def check(self, message, ecc, k=None): '''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.''' if not k: k = self.k message, _ = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) if self.algo == 1 or self.algo == 2: return self.ecc_manager.check_fast(message + ecc, k=k) elif self.algo == 3 or self.algo == 4: return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
python
def check(self, message, ecc, k=None): '''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.''' if not k: k = self.k message, _ = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) if self.algo == 1 or self.algo == 2: return self.ecc_manager.check_fast(message + ecc, k=k) elif self.algo == 3 or self.algo == 4: return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L239-L247
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
ECCMan.description
def description(self): '''Provide a description for each algorithm available, useful to print in ecc file''' if 0 < self.algo <= 3: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) elif self.algo == 4: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) under US FAA ADSB UAT RS FEC standard with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) else: return "No description for this ECC algorithm."
python
def description(self): '''Provide a description for each algorithm available, useful to print in ecc file''' if 0 < self.algo <= 3: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) elif self.algo == 4: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) under US FAA ADSB UAT RS FEC standard with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) else: return "No description for this ECC algorithm."
Provide a description for each algorithm available, useful to print in ecc file
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L249-L256
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
profile
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot')): """Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ... """ if fn is None: # @profile() syntax -- we are a decorator maker def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator # @profile syntax -- we are a decorator. if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(AVAILABLE_PROFILERS)) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) # fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...) # or HotShotFuncProfile # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
python
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot')): """Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ... """ if fn is None: # @profile() syntax -- we are a decorator maker def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator # @profile syntax -- we are a decorator. if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(AVAILABLE_PROFILERS)) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) # fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...) # or HotShotFuncProfile # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ...
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L137-L224
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
coverage
def coverage(fn): """Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ... """ fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
python
def coverage(fn): """Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ... """ fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ...
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L227-L255
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
coverage_with_hotshot
def coverage_with_hotshot(fn): """Mark `fn` for line coverage analysis. Uses the 'hotshot' module for fast coverage analysis. BUG: Produces inaccurate results. See the docstring of `coverage` for usage examples. """ fp = HotShotFuncCoverage(fn) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
python
def coverage_with_hotshot(fn): """Mark `fn` for line coverage analysis. Uses the 'hotshot' module for fast coverage analysis. BUG: Produces inaccurate results. See the docstring of `coverage` for usage examples. """ fp = HotShotFuncCoverage(fn) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Mark `fn` for line coverage analysis. Uses the 'hotshot' module for fast coverage analysis. BUG: Produces inaccurate results. See the docstring of `coverage` for usage examples.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L258-L276
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
timecall
def timecall(fn=None, immediate=True, timer=time.time): """Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock) """ if fn is None: # @timecall() syntax -- we are a decorator maker def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator # @timecall syntax -- we are a decorator. fp = FuncTimer(fn, immediate=immediate, timer=timer) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
python
def timecall(fn=None, immediate=True, timer=time.time): """Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock) """ if fn is None: # @timecall() syntax -- we are a decorator maker def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator # @timecall syntax -- we are a decorator. fp = FuncTimer(fn, immediate=immediate, timer=timer) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L655-L691
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
FuncProfile.print_stats
def print_stats(self): """Print profile information to sys.stdout.""" funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** PROFILER RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) if self.skipped: skipped = "(%d calls not profiled)" % self.skipped else: skipped = "" print("function called %d times%s" % (self.ncalls, skipped)) print("") stats = self.stats if self.filename: stats.dump_stats(self.filename) if not self.dirs: stats.strip_dirs() stats.sort_stats(*self.sort) stats.print_stats(self.entries)
python
def print_stats(self): """Print profile information to sys.stdout.""" funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** PROFILER RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) if self.skipped: skipped = "(%d calls not profiled)" % self.skipped else: skipped = "" print("function called %d times%s" % (self.ncalls, skipped)) print("") stats = self.stats if self.filename: stats.dump_stats(self.filename) if not self.dirs: stats.strip_dirs() stats.sort_stats(*self.sort) stats.print_stats(self.entries)
Print profile information to sys.stdout.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L332-L352
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
FuncProfile.reset_stats
def reset_stats(self): """Reset accumulated profiler statistics.""" # Note: not using self.Profile, since pstats.Stats() fails then self.stats = pstats.Stats(Profile()) self.ncalls = 0 self.skipped = 0
python
def reset_stats(self): """Reset accumulated profiler statistics.""" # Note: not using self.Profile, since pstats.Stats() fails then self.stats = pstats.Stats(Profile()) self.ncalls = 0 self.skipped = 0
Reset accumulated profiler statistics.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L354-L359
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
TraceFuncCoverage.atexit
def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** COVERAGE RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) print("function called %d times" % self.ncalls) print("") fs = FuncSource(self.fn) for (filename, lineno), count in self.tracer.counts.items(): if filename != fs.filename: continue fs.mark(lineno, count) print(fs) never_executed = fs.count_never_executed() if never_executed: print("%d lines were not executed." % never_executed)
python
def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** COVERAGE RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) print("function called %d times" % self.ncalls) print("") fs = FuncSource(self.fn) for (filename, lineno), count in self.tracer.counts.items(): if filename != fs.filename: continue fs.mark(lineno, count) print(fs) never_executed = fs.count_never_executed() if never_executed: print("%d lines were not executed." % never_executed)
Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L569-L590
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
FuncSource.find_source_lines
def find_source_lines(self): """Mark all executable source lines in fn as executed 0 times.""" strs = trace.find_strings(self.filename) lines = trace.find_lines_from_code(self.fn.__code__, strs) self.firstcodelineno = sys.maxint for lineno in lines: self.firstcodelineno = min(self.firstcodelineno, lineno) self.sourcelines.setdefault(lineno, 0) if self.firstcodelineno == sys.maxint: self.firstcodelineno = self.firstlineno
python
def find_source_lines(self): """Mark all executable source lines in fn as executed 0 times.""" strs = trace.find_strings(self.filename) lines = trace.find_lines_from_code(self.fn.__code__, strs) self.firstcodelineno = sys.maxint for lineno in lines: self.firstcodelineno = min(self.firstcodelineno, lineno) self.sourcelines.setdefault(lineno, 0) if self.firstcodelineno == sys.maxint: self.firstcodelineno = self.firstlineno
Mark all executable source lines in fn as executed 0 times.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L606-L615
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
FuncSource.mark
def mark(self, lineno, count=1): """Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up. """ self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
python
def mark(self, lineno, count=1): """Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up. """ self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L617-L622
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
FuncSource.count_never_executed
def count_never_executed(self): """Count statements that were never executed.""" lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter
python
def count_never_executed(self): """Count statements that were never executed.""" lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter
Count statements that were never executed.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L624-L633
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedList.add
def add(self, val): """Add the element *val* to the list.""" _maxes, _lists = self._maxes, self._lists if _maxes: pos = bisect_right(_maxes, val) if pos == len(_maxes): pos -= 1 _maxes[pos] = val _lists[pos].append(val) else: insort(_lists[pos], val) self._expand(pos) else: _maxes.append(val) _lists.append([val]) self._len += 1
python
def add(self, val): """Add the element *val* to the list.""" _maxes, _lists = self._maxes, self._lists if _maxes: pos = bisect_right(_maxes, val) if pos == len(_maxes): pos -= 1 _maxes[pos] = val _lists[pos].append(val) else: insort(_lists[pos], val) self._expand(pos) else: _maxes.append(val) _lists.append([val]) self._len += 1
Add the element *val* to the list.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L113-L132
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedList.remove
def remove(self, val): """ Remove first occurrence of *val*. Raises ValueError if *val* is not present. """ _maxes = self._maxes if not _maxes: raise ValueError('{0} not in list'.format(repr(val))) pos = bisect_left(_maxes, val) if pos == len(_maxes): raise ValueError('{0} not in list'.format(repr(val))) _lists = self._lists idx = bisect_left(_lists[pos], val) if _lists[pos][idx] == val: self._delete(pos, idx) else: raise ValueError('{0} not in list'.format(repr(val)))
python
def remove(self, val): """ Remove first occurrence of *val*. Raises ValueError if *val* is not present. """ _maxes = self._maxes if not _maxes: raise ValueError('{0} not in list'.format(repr(val))) pos = bisect_left(_maxes, val) if pos == len(_maxes): raise ValueError('{0} not in list'.format(repr(val))) _lists = self._lists idx = bisect_left(_lists[pos], val) if _lists[pos][idx] == val: self._delete(pos, idx) else: raise ValueError('{0} not in list'.format(repr(val)))
Remove first occurrence of *val*. Raises ValueError if *val* is not present.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L221-L242
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedList._delete
def _delete(self, pos, idx): """Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _index = self._maxes, self._lists, self._index lists_pos = _lists[pos] del lists_pos[idx] self._len -= 1 len_lists_pos = len(lists_pos) if len_lists_pos > self._half: _maxes[pos] = lists_pos[-1] if _index: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_lists) > 1: if not pos: pos += 1 prev = pos - 1 _lists[prev].extend(_lists[pos]) _maxes[prev] = _lists[prev][-1] del _maxes[pos] del _lists[pos] del _index[:] self._expand(prev) elif len_lists_pos: _maxes[pos] = lists_pos[-1] else: del _maxes[pos] del _lists[pos] del _index[:]
python
def _delete(self, pos, idx): """Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _index = self._maxes, self._lists, self._index lists_pos = _lists[pos] del lists_pos[idx] self._len -= 1 len_lists_pos = len(lists_pos) if len_lists_pos > self._half: _maxes[pos] = lists_pos[-1] if _index: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_lists) > 1: if not pos: pos += 1 prev = pos - 1 _lists[prev].extend(_lists[pos]) _maxes[prev] = _lists[prev][-1] del _maxes[pos] del _lists[pos] del _index[:] self._expand(prev) elif len_lists_pos: _maxes[pos] = lists_pos[-1] else: del _maxes[pos] del _lists[pos] del _index[:]
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L244-L296
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedList.extend
def extend(self, values): """ Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated. """ _maxes, _lists, _load = self._maxes, self._lists, self._load if not isinstance(values, list): values = list(values) if any(values[pos - 1] > values[pos] for pos in range(1, len(values))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if values[0] < _lists[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_lists[-1]) < self._half: _lists[-1].extend(values[:_load]) _maxes[-1] = _lists[-1][-1] offset = _load len_lists = len(_lists) for idx in range(offset, len(values), _load): _lists.append(values[idx:(idx + _load)]) _maxes.append(_lists[-1][-1]) _index = self._index if len_lists == len(_lists): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
python
def extend(self, values): """ Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated. """ _maxes, _lists, _load = self._maxes, self._lists, self._load if not isinstance(values, list): values = list(values) if any(values[pos - 1] > values[pos] for pos in range(1, len(values))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if values[0] < _lists[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_lists[-1]) < self._half: _lists[-1].extend(values[:_load]) _maxes[-1] = _lists[-1][-1] offset = _load len_lists = len(_lists) for idx in range(offset, len(values), _load): _lists.append(values[idx:(idx + _load)]) _maxes.append(_lists[-1][-1]) _index = self._index if len_lists == len(_lists): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L1035-L1081
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedList.insert
def insert(self, idx, val): """ Insert the element *val* into the list at *idx*. Raises a ValueError if the *val* at *idx* would violate the sort order. """ _maxes, _lists, _len = self._maxes, self._lists, self._len if idx < 0: idx += _len if idx < 0: idx = 0 if idx > _len: idx = _len if not _maxes: # The idx must be zero by the inequalities above. _maxes.append(val) _lists.append([val]) self._len = 1 return if not idx: if val > _lists[0][0]: msg = '{0} not in sort order at index {1}'.format(repr(val), 0) raise ValueError(msg) else: _lists[0].insert(0, val) self._expand(0) self._len += 1 return if idx == _len: pos = len(_lists) - 1 if _lists[pos][-1] > val: msg = '{0} not in sort order at index {1}'.format(repr(val), _len) raise ValueError(msg) else: _lists[pos].append(val) _maxes[pos] = _lists[pos][-1] self._expand(pos) self._len += 1 return pos, idx = self._pos(idx) idx_before = idx - 1 if idx_before < 0: pos_before = pos - 1 idx_before = len(_lists[pos_before]) - 1 else: pos_before = pos before = _lists[pos_before][idx_before] if before <= val <= _lists[pos][idx]: _lists[pos].insert(idx, val) self._expand(pos) self._len += 1 else: msg = '{0} not in sort order at index {1}'.format(repr(val), idx) raise ValueError(msg)
python
def insert(self, idx, val): """ Insert the element *val* into the list at *idx*. Raises a ValueError if the *val* at *idx* would violate the sort order. """ _maxes, _lists, _len = self._maxes, self._lists, self._len if idx < 0: idx += _len if idx < 0: idx = 0 if idx > _len: idx = _len if not _maxes: # The idx must be zero by the inequalities above. _maxes.append(val) _lists.append([val]) self._len = 1 return if not idx: if val > _lists[0][0]: msg = '{0} not in sort order at index {1}'.format(repr(val), 0) raise ValueError(msg) else: _lists[0].insert(0, val) self._expand(0) self._len += 1 return if idx == _len: pos = len(_lists) - 1 if _lists[pos][-1] > val: msg = '{0} not in sort order at index {1}'.format(repr(val), _len) raise ValueError(msg) else: _lists[pos].append(val) _maxes[pos] = _lists[pos][-1] self._expand(pos) self._len += 1 return pos, idx = self._pos(idx) idx_before = idx - 1 if idx_before < 0: pos_before = pos - 1 idx_before = len(_lists[pos_before]) - 1 else: pos_before = pos before = _lists[pos_before][idx_before] if before <= val <= _lists[pos][idx]: _lists[pos].insert(idx, val) self._expand(pos) self._len += 1 else: msg = '{0} not in sort order at index {1}'.format(repr(val), idx) raise ValueError(msg)
Insert the element *val* into the list at *idx*. Raises a ValueError if the *val* at *idx* would violate the sort order.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L1083-L1141
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
SortedListWithKey.update
def update(self, iterable): """Update the list by adding all elements from *iterable*.""" _maxes, _lists, _keys = self._maxes, self._lists, self._keys values = sorted(iterable, key=self._key) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort(key=self._key) self._clear() else: _add = self.add for val in values: _add(val) return _load, _index = self._load, self._index _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _keys.extend(list(map(self._key, _list)) for _list in _lists) _maxes.extend(sublist[-1] for sublist in _keys) self._len = len(values) del _index[:]
python
def update(self, iterable): """Update the list by adding all elements from *iterable*.""" _maxes, _lists, _keys = self._maxes, self._lists, self._keys values = sorted(iterable, key=self._key) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort(key=self._key) self._clear() else: _add = self.add for val in values: _add(val) return _load, _index = self._load, self._index _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _keys.extend(list(map(self._key, _list)) for _list in _lists) _maxes.extend(sublist[-1] for sublist in _keys) self._len = len(values) del _index[:]
Update the list by adding all elements from *iterable*.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L1517-L1539
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer.set_username_ibm
def set_username_ibm(self, username_ibm): """ Parameters ---------- username_ibm : str Raises ------ Exception If mode is not `ibm` """ if self.get_mode() == "ibm": self.__username_ibm = username_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format( self.get_moder()))
python
def set_username_ibm(self, username_ibm): """ Parameters ---------- username_ibm : str Raises ------ Exception If mode is not `ibm` """ if self.get_mode() == "ibm": self.__username_ibm = username_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format( self.get_moder()))
Parameters ---------- username_ibm : str Raises ------ Exception If mode is not `ibm`
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L325-L341
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer.set_password_ibm
def set_password_ibm(self, password_ibm): """ Parameters ---------- password_ibm : str Raises ------ Exception If mode is not `ibm` """ if self.get_mode() == "ibm": self.__password_ibm = password_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format(self.get_mode()))
python
def set_password_ibm(self, password_ibm): """ Parameters ---------- password_ibm : str Raises ------ Exception If mode is not `ibm` """ if self.get_mode() == "ibm": self.__password_ibm = password_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format(self.get_mode()))
Parameters ---------- password_ibm : str Raises ------ Exception If mode is not `ibm`
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L352-L367
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._list_audio_files
def _list_audio_files(self, sub_dir=""): """ Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav` """ audio_files = list() for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir, sub_dir)): file_format = ''.join(possibly_audio_file.split('.')[-1]) if file_format.lower() == "wav": audio_files.append(possibly_audio_file) return audio_files
python
def _list_audio_files(self, sub_dir=""): """ Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav` """ audio_files = list() for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir, sub_dir)): file_format = ''.join(possibly_audio_file.split('.')[-1]) if file_format.lower() == "wav": audio_files.append(possibly_audio_file) return audio_files
Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav`
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L418-L437
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._get_audio_channels
def _get_audio_channels(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- channel_num : int """ channel_num = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""" ).format(audio_abs_path, "Channels"), shell=True, universal_newlines=True).rstrip()) return channel_num
python
def _get_audio_channels(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- channel_num : int """ channel_num = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""" ).format(audio_abs_path, "Channels"), shell=True, universal_newlines=True).rstrip()) return channel_num
Parameters ---------- audio_abs_path : str Returns ------- channel_num : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L439-L454
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._get_audio_sample_rate
def _get_audio_sample_rate(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int """ sample_rate = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""" ).format(audio_abs_path, "Sample Rate"), shell=True, universal_newlines=True).rstrip()) return sample_rate
python
def _get_audio_sample_rate(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int """ sample_rate = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""" ).format(audio_abs_path, "Sample Rate"), shell=True, universal_newlines=True).rstrip()) return sample_rate
Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L456-L471
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._get_audio_sample_bit
def _get_audio_sample_bit(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- sample_bit : int """ sample_bit = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """ """grep -oh "^[^-]*" """).format(audio_abs_path, "Precision"), shell=True, universal_newlines=True).rstrip()) return sample_bit
python
def _get_audio_sample_bit(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- sample_bit : int """ sample_bit = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """ """grep -oh "^[^-]*" """).format(audio_abs_path, "Precision"), shell=True, universal_newlines=True).rstrip()) return sample_bit
Parameters ---------- audio_abs_path : str Returns ------- sample_bit : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L473-L488
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._get_audio_duration_seconds
def _get_audio_duration_seconds(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- total_seconds : int """ HHMMSS_duration = subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """ """grep -oh "^[^=]*" """).format( audio_abs_path, "Duration"), shell=True, universal_newlines=True).rstrip() total_seconds = sum( [float(x) * 60 ** (2 - i) for i, x in enumerate(HHMMSS_duration.split(":"))]) return total_seconds
python
def _get_audio_duration_seconds(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- total_seconds : int """ HHMMSS_duration = subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """ """grep -oh "^[^=]*" """).format( audio_abs_path, "Duration"), shell=True, universal_newlines=True).rstrip() total_seconds = sum( [float(x) * 60 ** (2 - i) for i, x in enumerate(HHMMSS_duration.split(":"))]) return total_seconds
Parameters ---------- audio_abs_path : str Returns ------- total_seconds : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L490-L508
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._get_audio_bit_rate
def _get_audio_bit_rate(self, audio_abs_path): """ Parameters ----------- audio_abs_path : str Returns ------- bit_rate : int """ bit_Rate_formatted = subprocess.check_output( """sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""".format( audio_abs_path, "Bit Rate"), shell=True, universal_newlines=True).rstrip() bit_rate = (lambda x: int(x[:-1]) * 10 ** 3 if x[-1].lower() == "k" else int(x[:-1]) * 10 ** 6 if x[-1].lower() == "m" else int(x[:-1]) * 10 ** 9 if x[-1].lower() == "g" else int(x))(bit_Rate_formatted) return bit_rate
python
def _get_audio_bit_rate(self, audio_abs_path): """ Parameters ----------- audio_abs_path : str Returns ------- bit_rate : int """ bit_Rate_formatted = subprocess.check_output( """sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""".format( audio_abs_path, "Bit Rate"), shell=True, universal_newlines=True).rstrip() bit_rate = (lambda x: int(x[:-1]) * 10 ** 3 if x[-1].lower() == "k" else int(x[:-1]) * 10 ** 6 if x[-1].lower() == "m" else int(x[:-1]) * 10 ** 9 if x[-1].lower() == "g" else int(x))(bit_Rate_formatted) return bit_rate
Parameters ----------- audio_abs_path : str Returns ------- bit_rate : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L510-L529
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._seconds_to_HHMMSS
def _seconds_to_HHMMSS(seconds): """ Retuns a string which is the hour, minute, second(milli) representation of the intput `seconds` Parameters ---------- seconds : float Returns ------- str Has the form <int>H<int>M<int>S.<float> """ less_than_second = seconds - floor(seconds) minutes, seconds = divmod(floor(seconds), 60) hours, minutes = divmod(minutes, 60) return "{}H{}M{}S.{}".format(hours, minutes, seconds, less_than_second)
python
def _seconds_to_HHMMSS(seconds): """ Retuns a string which is the hour, minute, second(milli) representation of the intput `seconds` Parameters ---------- seconds : float Returns ------- str Has the form <int>H<int>M<int>S.<float> """ less_than_second = seconds - floor(seconds) minutes, seconds = divmod(floor(seconds), 60) hours, minutes = divmod(minutes, 60) return "{}H{}M{}S.{}".format(hours, minutes, seconds, less_than_second)
Retuns a string which is the hour, minute, second(milli) representation of the intput `seconds` Parameters ---------- seconds : float Returns ------- str Has the form <int>H<int>M<int>S.<float>
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L531-L548
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer._audio_segment_extractor
def _audio_segment_extractor(self, audio_abs_path, segment_abs_path, starting_second, duration): """ Parameters ----------- audio_abs_path : str segment_abs_path : str starting_second : int duration : int """ subprocess.Popen(["sox", str(audio_abs_path), str(segment_abs_path), "trim", str(starting_second), str(duration)], universal_newlines=True).communicate()
python
def _audio_segment_extractor(self, audio_abs_path, segment_abs_path, starting_second, duration): """ Parameters ----------- audio_abs_path : str segment_abs_path : str starting_second : int duration : int """ subprocess.Popen(["sox", str(audio_abs_path), str(segment_abs_path), "trim", str(starting_second), str(duration)], universal_newlines=True).communicate()
Parameters ----------- audio_abs_path : str segment_abs_path : str starting_second : int duration : int
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L550-L563