repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/windows/advanced_config.py
ConfigPanel.chunk
def chunk(self, iterable, n, fillvalue=None): "itertools recipe: Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args)
python
def chunk(self, iterable, n, fillvalue=None): "itertools recipe: Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args)
itertools recipe: Collect data into fixed-length chunks or blocks
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/windows/advanced_config.py#L104-L108
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Positional.GetValue
def GetValue(self): ''' Positionals have no associated options_string, so only the supplied arguments are returned. The order is assumed to be the same as the order of declaration in the client code Returns "argument_value" ''' self.AssertInitialization('Positional') if str(self._widget.GetValue()) == EMPTY: return None return self._widget.GetValue()
python
def GetValue(self): ''' Positionals have no associated options_string, so only the supplied arguments are returned. The order is assumed to be the same as the order of declaration in the client code Returns "argument_value" ''' self.AssertInitialization('Positional') if str(self._widget.GetValue()) == EMPTY: return None return self._widget.GetValue()
Positionals have no associated options_string, so only the supplied arguments are returned. The order is assumed to be the same as the order of declaration in the client code Returns "argument_value"
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L265-L278
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Choice.GetValue
def GetValue(self): ''' Returns "--option_name argument" ''' self.AssertInitialization('Choice') if self._widget.GetValue() == self._DEFAULT_VALUE: return None return ' '.join( [self._action.option_strings[0] if self._action.option_strings else '', # get the verbose copy if available self._widget.GetValue()])
python
def GetValue(self): ''' Returns "--option_name argument" ''' self.AssertInitialization('Choice') if self._widget.GetValue() == self._DEFAULT_VALUE: return None return ' '.join( [self._action.option_strings[0] if self._action.option_strings else '', # get the verbose copy if available self._widget.GetValue()])
Returns "--option_name argument"
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L291-L301
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Optional.GetValue
def GetValue(self): ''' General options are key/value style pairs (conceptually). Thus the name of the option, as well as the argument to it are returned e.g. >>> myscript --outfile myfile.txt returns "--Option Value" ''' self.AssertInitialization('Optional') value = self._widget.GetValue() if not value or len(value) <= 0: return None return ' '.join( [self._action.option_strings[0], # get the verbose copy if available value])
python
def GetValue(self): ''' General options are key/value style pairs (conceptually). Thus the name of the option, as well as the argument to it are returned e.g. >>> myscript --outfile myfile.txt returns "--Option Value" ''' self.AssertInitialization('Optional') value = self._widget.GetValue() if not value or len(value) <= 0: return None return ' '.join( [self._action.option_strings[0], # get the verbose copy if available value])
General options are key/value style pairs (conceptually). Thus the name of the option, as well as the argument to it are returned e.g. >>> myscript --outfile myfile.txt returns "--Option Value"
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L323-L339
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Flag.GetValue
def GetValue(self): ''' Flag options have no param associated with them. Thus we only need the name of the option. e.g >>> Python -v myscript returns Options name for argument (-v) ''' if not self._widget.GetValue() or len(self._widget.GetValue()) <= 0: return None else: return self._action.option_strings[0]
python
def GetValue(self): ''' Flag options have no param associated with them. Thus we only need the name of the option. e.g >>> Python -v myscript returns Options name for argument (-v) ''' if not self._widget.GetValue() or len(self._widget.GetValue()) <= 0: return None else: return self._action.option_strings[0]
Flag options have no param associated with them. Thus we only need the name of the option. e.g >>> Python -v myscript returns Options name for argument (-v)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L381-L393
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Flag.Update
def Update(self, size): ''' Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox ''' if self._msg is None: return help_msg = self._msg width, height = size content_area = int((width / 3) * .70) wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05)) if help_msg.Size[0] not in wiggle_room: self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' ')) self._msg.Wrap(content_area)
python
def Update(self, size): ''' Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox ''' if self._msg is None: return help_msg = self._msg width, height = size content_area = int((width / 3) * .70) wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05)) if help_msg.Size[0] not in wiggle_room: self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' ')) self._msg.Wrap(content_area)
Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L395-L410
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/components.py
Counter.GetValue
def GetValue(self): ''' NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value ''' dropdown_value = self._widget.GetValue() if not str(dropdown_value).isdigit(): return None arg = str(self._action.option_strings[0]).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
python
def GetValue(self): ''' NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value ''' dropdown_value = self._widget.GetValue() if not str(dropdown_value).isdigit(): return None arg = str(self._action.option_strings[0]).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L429-L446
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/lang/i18n.py
get_path
def get_path(language): ''' Returns the full path to the language file ''' filename = language.lower() + '.json' lang_file_path = os.path.join(_DEFAULT_DIR, filename) if not os.path.exists(lang_file_path): raise IOError('Could not find {} language file'.format(language)) return lang_file_path
python
def get_path(language): ''' Returns the full path to the language file ''' filename = language.lower() + '.json' lang_file_path = os.path.join(_DEFAULT_DIR, filename) if not os.path.exists(lang_file_path): raise IOError('Could not find {} language file'.format(language)) return lang_file_path
Returns the full path to the language file
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/lang/i18n.py#L23-L29
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/lang/i18n.py
load
def load(filename): ''' Open and return the supplied json file ''' global _DICTIONARY try: json_file = filename + '.json' with open(os.path.join(_DEFAULT_DIR, json_file), 'rb') as f: _DICTIONARY = json.load(f) except IOError: raise IOError('Language file not found. Make sure that your ', 'translation file is in the languages directory, ')
python
def load(filename): ''' Open and return the supplied json file ''' global _DICTIONARY try: json_file = filename + '.json' with open(os.path.join(_DEFAULT_DIR, json_file), 'rb') as f: _DICTIONARY = json.load(f) except IOError: raise IOError('Language file not found. Make sure that your ', 'translation file is in the languages directory, ')
Open and return the supplied json file
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/lang/i18n.py#L32-L41
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py
safe_repr
def safe_repr(obj, clip=None): """ Convert object to string representation, yielding the same result a `repr` but catches all exceptions and returns 'N/A' instead of raising the exception. Strings may be truncated by providing `clip`. >>> safe_repr(42) '42' >>> safe_repr('Clipped text', clip=8) 'Clip..xt' >>> safe_repr([1,2,3,4], clip=8) '[1,2..4]' """ try: s = repr(obj) if not clip or len(s) <= clip: return s else: return s[:clip-4]+'..'+s[-2:] except: return 'N/A'
python
def safe_repr(obj, clip=None): """ Convert object to string representation, yielding the same result a `repr` but catches all exceptions and returns 'N/A' instead of raising the exception. Strings may be truncated by providing `clip`. >>> safe_repr(42) '42' >>> safe_repr('Clipped text', clip=8) 'Clip..xt' >>> safe_repr([1,2,3,4], clip=8) '[1,2..4]' """ try: s = repr(obj) if not clip or len(s) <= clip: return s else: return s[:clip-4]+'..'+s[-2:] except: return 'N/A'
Convert object to string representation, yielding the same result a `repr` but catches all exceptions and returns 'N/A' instead of raising the exception. Strings may be truncated by providing `clip`. >>> safe_repr(42) '42' >>> safe_repr('Clipped text', clip=8) 'Clip..xt' >>> safe_repr([1,2,3,4], clip=8) '[1,2..4]'
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py#L5-L25
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py
trunc
def trunc(obj, max, left=0): """ Convert `obj` to string, eliminate newlines and truncate the string to `max` characters. If there are more characters in the string add ``...`` to the string. With `left=True`, the string can be truncated at the beginning. @note: Does not catch exceptions when converting `obj` to string with `str`. >>> trunc('This is a long text.', 8) This ... >>> trunc('This is a long text.', 8, left) ...text. """ s = str(obj) s = s.replace('\n', '|') if len(s) > max: if left: return '...'+s[len(s)-max+3:] else: return s[:(max-3)]+'...' else: return s
python
def trunc(obj, max, left=0): """ Convert `obj` to string, eliminate newlines and truncate the string to `max` characters. If there are more characters in the string add ``...`` to the string. With `left=True`, the string can be truncated at the beginning. @note: Does not catch exceptions when converting `obj` to string with `str`. >>> trunc('This is a long text.', 8) This ... >>> trunc('This is a long text.', 8, left) ...text. """ s = str(obj) s = s.replace('\n', '|') if len(s) > max: if left: return '...'+s[len(s)-max+3:] else: return s[:(max-3)]+'...' else: return s
Convert `obj` to string, eliminate newlines and truncate the string to `max` characters. If there are more characters in the string add ``...`` to the string. With `left=True`, the string can be truncated at the beginning. @note: Does not catch exceptions when converting `obj` to string with `str`. >>> trunc('This is a long text.', 8) This ... >>> trunc('This is a long text.', 8, left) ...text.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py#L28-L49
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py
pp
def pp(i, base=1024): """ Pretty-print the integer `i` as a human-readable size representation. """ degree = 0 pattern = "%4d %s" while i > base: pattern = "%7.2f %s" i = i / float(base) degree += 1 scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB'] return pattern % (i, scales[degree])
python
def pp(i, base=1024): """ Pretty-print the integer `i` as a human-readable size representation. """ degree = 0 pattern = "%4d %s" while i > base: pattern = "%7.2f %s" i = i / float(base) degree += 1 scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB'] return pattern % (i, scales[degree])
Pretty-print the integer `i` as a human-readable size representation.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py#L51-L62
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py
pp_timestamp
def pp_timestamp(t): """ Get a friendly timestamp represented as a string. """ if t is None: return '' h, m, s = int(t / 3600), int(t / 60 % 60), t % 60 return "%02d:%02d:%05.2f" % (h, m, s)
python
def pp_timestamp(t): """ Get a friendly timestamp represented as a string. """ if t is None: return '' h, m, s = int(t / 3600), int(t / 60 % 60), t % 60 return "%02d:%02d:%05.2f" % (h, m, s)
Get a friendly timestamp represented as a string.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/stringutils.py#L64-L71
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/garbagegraph.py
GarbageGraph.print_stats
def print_stats(self, stream=None): """ Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given """ if not stream: # pragma: no cover stream = sys.stdout self.metadata.sort(key=lambda x: -x.size) stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation')) for g in self.metadata: stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46))) stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \ (self.count, self.num_in_cycles, pp(self.total_size)))
python
def print_stats(self, stream=None): """ Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given """ if not stream: # pragma: no cover stream = sys.stdout self.metadata.sort(key=lambda x: -x.size) stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation')) for g in self.metadata: stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46))) stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \ (self.count, self.num_in_cycles, pp(self.total_size)))
Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/garbagegraph.py#L47-L61
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pprofile/pprofile.py
ProfileBase.getFilenameSet
def getFilenameSet(self): """ Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path. """ result = set(self.file_dict) # Ignore profiling code. __file__ does not always provide consistent # results with f_code.co_filename (ex: easy_install with zipped egg), # so inspect current frame instead. # XXX: assumes all of pprofile code resides in a single file. result.discard(inspect.currentframe().f_code.co_filename) return result
python
def getFilenameSet(self): """ Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path. """ result = set(self.file_dict) # Ignore profiling code. __file__ does not always provide consistent # results with f_code.co_filename (ex: easy_install with zipped egg), # so inspect current frame instead. # XXX: assumes all of pprofile code resides in a single file. result.discard(inspect.currentframe().f_code.co_filename) return result
Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pprofile/pprofile.py#L165-L179
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pprofile/pprofile.py
ProfileBase.callgrind
def callgrind(self, out, filename=None, commandline=None, relative_path=False): """ Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code. """ print >> out, 'version: 1' if commandline is not None: print >> out, 'cmd:', commandline print >> out, 'creator: pprofile' print >> out, 'event: usphit :us/hit' print >> out, 'events: hits us usphit' file_dict = self.file_dict if relative_path: convertPath = _relpath else: convertPath = lambda x: x for name in self._getFileNameList(filename): printable_name = convertPath(name) print >> out, 'fl=%s' % printable_name funcname = False call_list_by_line = file_dict[name].getCallListByLine() for lineno, func, firstlineno, hits, duration, _ in self._iterFile( name, call_list_by_line): call_list = call_list_by_line.get(lineno, ()) if not hits and not call_list: continue if func is None: func, firstlineno = call_list[0][:2] if funcname != func: funcname = func print >> out, 'fn=%s' % _getFuncOrFile(func, printable_name, firstlineno) ticks = int(duration * 1000000) if hits == 0: ticksperhit = 0 else: ticksperhit = ticks / hits print >> out, lineno, hits, ticks, int(ticksperhit) for _, _, hits, duration, callee_file, callee_line, \ callee_name in sorted(call_list, key=lambda x: x[2:4]): callee_file = convertPath(callee_file) print >> out, 'cfl=%s' % callee_file print >> out, 'cfn=%s' % _getFuncOrFile(callee_name, callee_file, callee_line) print >> out, 'calls=%s' % hits, callee_line duration *= 1000000 print >> out, lineno, hits, int(duration), int(duration / hits)
python
def callgrind(self, out, filename=None, commandline=None, relative_path=False): """ Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code. """ print >> out, 'version: 1' if commandline is not None: print >> out, 'cmd:', commandline print >> out, 'creator: pprofile' print >> out, 'event: usphit :us/hit' print >> out, 'events: hits us usphit' file_dict = self.file_dict if relative_path: convertPath = _relpath else: convertPath = lambda x: x for name in self._getFileNameList(filename): printable_name = convertPath(name) print >> out, 'fl=%s' % printable_name funcname = False call_list_by_line = file_dict[name].getCallListByLine() for lineno, func, firstlineno, hits, duration, _ in self._iterFile( name, call_list_by_line): call_list = call_list_by_line.get(lineno, ()) if not hits and not call_list: continue if func is None: func, firstlineno = call_list[0][:2] if funcname != func: funcname = func print >> out, 'fn=%s' % _getFuncOrFile(func, printable_name, firstlineno) ticks = int(duration * 1000000) if hits == 0: ticksperhit = 0 else: ticksperhit = ticks / hits print >> out, lineno, hits, ticks, int(ticksperhit) for _, _, hits, duration, callee_file, callee_line, \ callee_name in sorted(call_list, key=lambda x: x[2:4]): callee_file = convertPath(callee_file) print >> out, 'cfl=%s' % callee_file print >> out, 'cfn=%s' % _getFuncOrFile(callee_name, callee_file, callee_line) print >> out, 'calls=%s' % hits, callee_line duration *= 1000000 print >> out, lineno, hits, int(duration), int(duration / hits)
Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pprofile/pprofile.py#L220-L284
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pprofile/pprofile.py
ProfileBase.annotate
def annotate(self, out, filename=None, commandline=None, relative_path=False): """ Dump annotated source code with current profiling statistics to "out" file. Time unit: second. out (file-ish opened for writing) Destination of annotated sources. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this annotation. relative_path (bool) For compatibility with callgrind. Ignored. """ file_dict = self.file_dict total_time = self.total_time if commandline is not None: print >> out, 'Command line:', commandline print >> out, 'Total duration: %gs' % total_time if not total_time: return def percent(value, scale): if scale == 0: return 0 return value * 100 / float(scale) for name in self._getFileNameList(filename): file_timing = file_dict[name] file_total_time = file_timing.getTotalTime() call_list_by_line = file_timing.getCallListByLine() print >> out, 'File:', name print >> out, 'File duration: %gs (%.2f%%)' % (file_total_time, percent(file_total_time, total_time)) print >> out, _ANNOTATE_HEADER print >> out, _ANNOTATE_HORIZONTAL_LINE for lineno, _, _, hits, duration, line in self._iterFile(name, call_list_by_line): if hits: time_per_hit = duration / hits else: time_per_hit = 0 print >> out, _ANNOTATE_FORMAT % { 'lineno': lineno, 'hits': hits, 'time': duration, 'time_per_hit': time_per_hit, 'percent': percent(duration, total_time), 'line': line, }, for _, _, hits, duration, callee_file, callee_line, \ callee_name in call_list_by_line.get(lineno, ()): print >> out, _ANNOTATE_CALL_FORMAT % { 'hits': hits, 'time': duration, 'time_per_hit': duration / hits, 'percent': percent(duration, total_time), 'callee_file': callee_file, 'callee_line': callee_line, 'callee_name': callee_name, }
python
def annotate(self, out, filename=None, commandline=None, relative_path=False): """ Dump annotated source code with current profiling statistics to "out" file. Time unit: second. out (file-ish opened for writing) Destination of annotated sources. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this annotation. relative_path (bool) For compatibility with callgrind. Ignored. """ file_dict = self.file_dict total_time = self.total_time if commandline is not None: print >> out, 'Command line:', commandline print >> out, 'Total duration: %gs' % total_time if not total_time: return def percent(value, scale): if scale == 0: return 0 return value * 100 / float(scale) for name in self._getFileNameList(filename): file_timing = file_dict[name] file_total_time = file_timing.getTotalTime() call_list_by_line = file_timing.getCallListByLine() print >> out, 'File:', name print >> out, 'File duration: %gs (%.2f%%)' % (file_total_time, percent(file_total_time, total_time)) print >> out, _ANNOTATE_HEADER print >> out, _ANNOTATE_HORIZONTAL_LINE for lineno, _, _, hits, duration, line in self._iterFile(name, call_list_by_line): if hits: time_per_hit = duration / hits else: time_per_hit = 0 print >> out, _ANNOTATE_FORMAT % { 'lineno': lineno, 'hits': hits, 'time': duration, 'time_per_hit': time_per_hit, 'percent': percent(duration, total_time), 'line': line, }, for _, _, hits, duration, callee_file, callee_line, \ callee_name in call_list_by_line.get(lineno, ()): print >> out, _ANNOTATE_CALL_FORMAT % { 'hits': hits, 'time': duration, 'time_per_hit': duration / hits, 'percent': percent(duration, total_time), 'callee_file': callee_file, 'callee_line': callee_line, 'callee_name': callee_name, }
Dump annotated source code with current profiling statistics to "out" file. Time unit: second. out (file-ish opened for writing) Destination of annotated sources. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this annotation. relative_path (bool) For compatibility with callgrind. Ignored.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pprofile/pprofile.py#L286-L346
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pprofile/pprofile.py
Profile.disable
def disable(self, threads=True): """ Disable profiling. """ if self.enabled_start: sys.settrace(None) self._disable() else: warn('Duplicate "disable" call')
python
def disable(self, threads=True): """ Disable profiling. """ if self.enabled_start: sys.settrace(None) self._disable() else: warn('Duplicate "disable" call')
Disable profiling.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pprofile/pprofile.py#L477-L485
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/pprofile/pprofile.py
Profile.run
def run(self, cmd): """Similar to profile.Profile.run .""" import __main__ dict = __main__.__dict__ return self.runctx(cmd, dict, dict)
python
def run(self, cmd): """Similar to profile.Profile.run .""" import __main__ dict = __main__.__dict__ return self.runctx(cmd, dict, dict)
Similar to profile.Profile.run .
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/pprofile/pprofile.py#L553-L557
lrq3000/pyFileFixity
pyFileFixity/lib/tee.py
Tee.write
def write(self, data, end="\n", flush=True): """ Output data to stdout and/or file """ if not self.nostdout: self.stdout.write(data+end) if self.file is not None: self.file.write(data+end) if flush: self.flush()
python
def write(self, data, end="\n", flush=True): """ Output data to stdout and/or file """ if not self.nostdout: self.stdout.write(data+end) if self.file is not None: self.file.write(data+end) if flush: self.flush()
Output data to stdout and/or file
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/tee.py#L27-L34
lrq3000/pyFileFixity
pyFileFixity/lib/tee.py
Tee.flush
def flush(self): """ Force commit changes to the file and stdout """ if not self.nostdout: self.stdout.flush() if self.file is not None: self.file.flush()
python
def flush(self): """ Force commit changes to the file and stdout """ if not self.nostdout: self.stdout.flush() if self.file is not None: self.file.flush()
Force commit changes to the file and stdout
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/tee.py#L36-L41
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_levenshtein.py
levenshtein
def levenshtein(seq1, seq2, normalized=False, max_dist=-1): """Compute the absolute Levenshtein distance between the two sequences `seq1` and `seq2`. The Levenshtein distance is the minimum number of edit operations necessary for transforming one sequence into the other. The edit operations allowed are: * deletion: ABC -> BC, AC, AB * insertion: ABC -> ABCD, EABC, AEBC.. * substitution: ABC -> ABE, ADC, FBC.. The `max_dist` parameter controls at which moment we should stop computing the distance between the provided sequences. If it is a negative integer, the distance will be computed until the sequences are exhausted; otherwise, the computation will stop at the moment the calculated distance is higher than `max_dist`, and then return -1. For example: >>> levenshtein("abc", "abcd", max_dist=1) # dist = 1 1 >>> levenshtein("abc", "abcde", max_dist=1) # dist = 2 -1 This can be a time saver if you're not interested in the exact distance, but only need to check if the distance between the given sequences is below a given threshold. The `normalized` parameter is here for backward compatibility; providing it will result in a call to `nlevenshtein`, which should be used directly instead. """ if normalized: return nlevenshtein(seq1, seq2, method=1) if seq1 == seq2: return 0 len1, len2 = len(seq1), len(seq2) if max_dist >= 0 and abs(len1 - len2) > max_dist: return -1 if len1 == 0: return len2 if len2 == 0: return len1 if len1 < len2: len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 column = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = x last = x - 1 for y in range(1, len2 + 1): old = column[y] cost = int(seq1[x - 1] != seq2[y - 1]) column[y] = min(column[y] + 1, column[y - 1] + 1, last + cost) last = old if max_dist >= 0 and min(column) > max_dist: return -1 if max_dist >= 0 and column[len2] > max_dist: # stay consistent, even if we have the exact distance return -1 return column[len2]
python
def levenshtein(seq1, seq2, normalized=False, max_dist=-1): """Compute the absolute Levenshtein distance between the two sequences `seq1` and `seq2`. The Levenshtein distance is the minimum number of edit operations necessary for transforming one sequence into the other. The edit operations allowed are: * deletion: ABC -> BC, AC, AB * insertion: ABC -> ABCD, EABC, AEBC.. * substitution: ABC -> ABE, ADC, FBC.. The `max_dist` parameter controls at which moment we should stop computing the distance between the provided sequences. If it is a negative integer, the distance will be computed until the sequences are exhausted; otherwise, the computation will stop at the moment the calculated distance is higher than `max_dist`, and then return -1. For example: >>> levenshtein("abc", "abcd", max_dist=1) # dist = 1 1 >>> levenshtein("abc", "abcde", max_dist=1) # dist = 2 -1 This can be a time saver if you're not interested in the exact distance, but only need to check if the distance between the given sequences is below a given threshold. The `normalized` parameter is here for backward compatibility; providing it will result in a call to `nlevenshtein`, which should be used directly instead. """ if normalized: return nlevenshtein(seq1, seq2, method=1) if seq1 == seq2: return 0 len1, len2 = len(seq1), len(seq2) if max_dist >= 0 and abs(len1 - len2) > max_dist: return -1 if len1 == 0: return len2 if len2 == 0: return len1 if len1 < len2: len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 column = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = x last = x - 1 for y in range(1, len2 + 1): old = column[y] cost = int(seq1[x - 1] != seq2[y - 1]) column[y] = min(column[y] + 1, column[y - 1] + 1, last + cost) last = old if max_dist >= 0 and min(column) > max_dist: return -1 if max_dist >= 0 and column[len2] > max_dist: # stay consistent, even if we have the exact distance return -1 return column[len2]
Compute the absolute Levenshtein distance between the two sequences `seq1` and `seq2`. The Levenshtein distance is the minimum number of edit operations necessary for transforming one sequence into the other. The edit operations allowed are: * deletion: ABC -> BC, AC, AB * insertion: ABC -> ABCD, EABC, AEBC.. * substitution: ABC -> ABE, ADC, FBC.. The `max_dist` parameter controls at which moment we should stop computing the distance between the provided sequences. If it is a negative integer, the distance will be computed until the sequences are exhausted; otherwise, the computation will stop at the moment the calculated distance is higher than `max_dist`, and then return -1. For example: >>> levenshtein("abc", "abcd", max_dist=1) # dist = 1 1 >>> levenshtein("abc", "abcde", max_dist=1) # dist = 2 -1 This can be a time saver if you're not interested in the exact distance, but only need to check if the distance between the given sequences is below a given threshold. The `normalized` parameter is here for backward compatibility; providing it will result in a call to `nlevenshtein`, which should be used directly instead.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_levenshtein.py#L6-L69
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_levenshtein.py
nlevenshtein
def nlevenshtein(seq1, seq2, method=1): """Compute the normalized Levenshtein distance between `seq1` and `seq2`. Two normalization methods are provided. For both of them, the normalized distance will be a float between 0 and 1, where 0 means equal and 1 completely different. The computation obeys the following patterns: 0.0 if seq1 == seq2 1.0 if len(seq1) == 0 or len(seq2) == 0 edit distance / factor otherwise The `method` parameter specifies which normalization factor should be used. It can have the value 1 or 2, which correspond to the following: 1: the length of the shortest alignment between the sequences (that is, the length of the longest sequence) 2: the length of the longest alignment between the sequences Which normalization factor should be chosen is a matter of taste. The first one is cheap to compute. The second one is more costly, but it accounts better than the first one for parallelisms of symbols between the sequences. For the rationale behind the use of the second method, see: Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein Distance", 2004, p. 130 sq, which is available online at: http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf """ if seq1 == seq2: return 0.0 len1, len2 = len(seq1), len(seq2) if len1 == 0 or len2 == 0: return 1.0 if len1 < len2: # minimize the arrays size len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 if method == 1: return levenshtein(seq1, seq2) / float(len1) if method != 2: raise ValueError("expected either 1 or 2 for `method` parameter") column = array('L', range(len2 + 1)) length = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = length[0] = x last = llast = x - 1 for y in range(1, len2 + 1): # dist old = column[y] ic = column[y - 1] + 1 dc = column[y] + 1 rc = last + (seq1[x - 1] != seq2[y - 1]) column[y] = min(ic, dc, rc) last = old # length lold = length[y] lic = length[y - 1] + 1 if ic == column[y] else 0 ldc = length[y] + 1 if dc == column[y] else 0 lrc = llast + 1 if rc == column[y] else 0 length[y] = max(ldc, lic, lrc) llast = lold return column[y] / float(length[y])
python
def nlevenshtein(seq1, seq2, method=1): """Compute the normalized Levenshtein distance between `seq1` and `seq2`. Two normalization methods are provided. For both of them, the normalized distance will be a float between 0 and 1, where 0 means equal and 1 completely different. The computation obeys the following patterns: 0.0 if seq1 == seq2 1.0 if len(seq1) == 0 or len(seq2) == 0 edit distance / factor otherwise The `method` parameter specifies which normalization factor should be used. It can have the value 1 or 2, which correspond to the following: 1: the length of the shortest alignment between the sequences (that is, the length of the longest sequence) 2: the length of the longest alignment between the sequences Which normalization factor should be chosen is a matter of taste. The first one is cheap to compute. The second one is more costly, but it accounts better than the first one for parallelisms of symbols between the sequences. For the rationale behind the use of the second method, see: Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein Distance", 2004, p. 130 sq, which is available online at: http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf """ if seq1 == seq2: return 0.0 len1, len2 = len(seq1), len(seq2) if len1 == 0 or len2 == 0: return 1.0 if len1 < len2: # minimize the arrays size len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 if method == 1: return levenshtein(seq1, seq2) / float(len1) if method != 2: raise ValueError("expected either 1 or 2 for `method` parameter") column = array('L', range(len2 + 1)) length = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = length[0] = x last = llast = x - 1 for y in range(1, len2 + 1): # dist old = column[y] ic = column[y - 1] + 1 dc = column[y] + 1 rc = last + (seq1[x - 1] != seq2[y - 1]) column[y] = min(ic, dc, rc) last = old # length lold = length[y] lic = length[y - 1] + 1 if ic == column[y] else 0 ldc = length[y] + 1 if dc == column[y] else 0 lrc = llast + 1 if rc == column[y] else 0 length[y] = max(ldc, lic, lrc) llast = lold return column[y] / float(length[y])
Compute the normalized Levenshtein distance between `seq1` and `seq2`. Two normalization methods are provided. For both of them, the normalized distance will be a float between 0 and 1, where 0 means equal and 1 completely different. The computation obeys the following patterns: 0.0 if seq1 == seq2 1.0 if len(seq1) == 0 or len(seq2) == 0 edit distance / factor otherwise The `method` parameter specifies which normalization factor should be used. It can have the value 1 or 2, which correspond to the following: 1: the length of the shortest alignment between the sequences (that is, the length of the longest sequence) 2: the length of the longest alignment between the sequences Which normalization factor should be chosen is a matter of taste. The first one is cheap to compute. The second one is more costly, but it accounts better than the first one for parallelisms of symbols between the sequences. For the rationale behind the use of the second method, see: Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein Distance", 2004, p. 130 sq, which is available online at: http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_levenshtein.py#L72-L140
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/pstatsadapter.py
PStatsAdapter.parents
def parents(self, node): """Determine all parents of node in our tree""" return [ parent for parent in getattr( node, 'parents', [] ) if getattr(parent, 'tree', self.TREE) == self.TREE ]
python
def parents(self, node): """Determine all parents of node in our tree""" return [ parent for parent in getattr( node, 'parents', [] ) if getattr(parent, 'tree', self.TREE) == self.TREE ]
Determine all parents of node in our tree
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/pstatsadapter.py#L37-L43
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/pstatsadapter.py
PStatsAdapter.filename
def filename( self, node ): """Extension to squaremap api to provide "what file is this" information""" if not node.directory: # TODO: any cases other than built-ins? return None if node.filename == '~': # TODO: look up C/Cython/whatever source??? return None return os.path.join(node.directory, node.filename)
python
def filename( self, node ): """Extension to squaremap api to provide "what file is this" information""" if not node.directory: # TODO: any cases other than built-ins? return None if node.filename == '~': # TODO: look up C/Cython/whatever source??? return None return os.path.join(node.directory, node.filename)
Extension to squaremap api to provide "what file is this" information
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/pstatsadapter.py#L65-L73
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
get_ref
def get_ref(obj): """ Get string reference to object. Stores a weak reference in a dictionary using the object's id as the key. If the object cannot be weakly referenced (e.g. dictionaries, frame objects), store a strong references in a classic dictionary. Returns the object's id as a string. """ oid = id(obj) try: server.id2ref[oid] = obj except TypeError: server.id2obj[oid] = obj return str(oid)
python
def get_ref(obj): """ Get string reference to object. Stores a weak reference in a dictionary using the object's id as the key. If the object cannot be weakly referenced (e.g. dictionaries, frame objects), store a strong references in a classic dictionary. Returns the object's id as a string. """ oid = id(obj) try: server.id2ref[oid] = obj except TypeError: server.id2obj[oid] = obj return str(oid)
Get string reference to object. Stores a weak reference in a dictionary using the object's id as the key. If the object cannot be weakly referenced (e.g. dictionaries, frame objects), store a strong references in a classic dictionary. Returns the object's id as a string.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L69-L83
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
get_obj
def get_obj(ref): """Get object from string reference.""" oid = int(ref) return server.id2ref.get(oid) or server.id2obj[oid]
python
def get_obj(ref): """Get object from string reference.""" oid = int(ref) return server.id2ref.get(oid) or server.id2obj[oid]
Get object from string reference.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L86-L89
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
process
def process(): """Get process overview.""" pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
python
def process(): """Get process overview.""" pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
Get process overview.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L107-L111
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
tracker_index
def tracker_index(): """Get tracker overview.""" stats = server.stats if stats and stats.snapshots: stats.annotate() timeseries = [] for cls in stats.tracked_classes: series = [] for snapshot in stats.snapshots: series.append(snapshot.classes.get(cls, {}).get('sum', 0)) timeseries.append((cls, series)) series = [s.overhead for s in stats.snapshots] timeseries.append(("Profiling overhead", series)) if stats.snapshots[0].system_total.data_segment: # Assume tracked data resides in the data segment series = [s.system_total.data_segment - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Data segment", series)) series = [s.system_total.code_segment for s in stats.snapshots] timeseries.append(("Code segment", series)) series = [s.system_total.stack_segment for s in stats.snapshots] timeseries.append(("Stack segment", series)) series = [s.system_total.shared_segment for s in stats.snapshots] timeseries.append(("Shared memory", series)) else: series = [s.total - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Other", series)) return dict(snapshots=stats.snapshots, timeseries=timeseries) else: return dict(snapshots=[])
python
def tracker_index(): """Get tracker overview.""" stats = server.stats if stats and stats.snapshots: stats.annotate() timeseries = [] for cls in stats.tracked_classes: series = [] for snapshot in stats.snapshots: series.append(snapshot.classes.get(cls, {}).get('sum', 0)) timeseries.append((cls, series)) series = [s.overhead for s in stats.snapshots] timeseries.append(("Profiling overhead", series)) if stats.snapshots[0].system_total.data_segment: # Assume tracked data resides in the data segment series = [s.system_total.data_segment - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Data segment", series)) series = [s.system_total.code_segment for s in stats.snapshots] timeseries.append(("Code segment", series)) series = [s.system_total.stack_segment for s in stats.snapshots] timeseries.append(("Stack segment", series)) series = [s.system_total.shared_segment for s in stats.snapshots] timeseries.append(("Shared memory", series)) else: series = [s.total - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Other", series)) return dict(snapshots=stats.snapshots, timeseries=timeseries) else: return dict(snapshots=[])
Get tracker overview.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L116-L148
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
tracker_class
def tracker_class(clsname): """Get class instance details.""" stats = server.stats if not stats: bottle.redirect('/tracker') stats.annotate() return dict(stats=stats, clsname=clsname)
python
def tracker_class(clsname): """Get class instance details.""" stats = server.stats if not stats: bottle.redirect('/tracker') stats.annotate() return dict(stats=stats, clsname=clsname)
Get class instance details.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L153-L159
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
garbage_cycle
def garbage_cycle(index): """Get reference cycle details.""" graph = _compute_garbage_graphs()[int(index)] graph.reduce_to_cycles() objects = graph.metadata objects.sort(key=lambda x: -x.size) return dict(objects=objects, index=index)
python
def garbage_cycle(index): """Get reference cycle details.""" graph = _compute_garbage_graphs()[int(index)] graph.reduce_to_cycles() objects = graph.metadata objects.sort(key=lambda x: -x.size) return dict(objects=objects, index=index)
Get reference cycle details.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L224-L230
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
_get_graph
def _get_graph(graph, filename): """Retrieve or render a graph.""" try: rendered = graph.rendered_file except AttributeError: try: graph.render(os.path.join(server.tmpdir, filename), format='png') rendered = filename except OSError: rendered = None graph.rendered_file = rendered return rendered
python
def _get_graph(graph, filename): """Retrieve or render a graph.""" try: rendered = graph.rendered_file except AttributeError: try: graph.render(os.path.join(server.tmpdir, filename), format='png') rendered = filename except OSError: rendered = None graph.rendered_file = rendered return rendered
Retrieve or render a graph.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L233-L244
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
garbage_graph
def garbage_graph(index): """Get graph representation of reference cycle.""" graph = _compute_garbage_graphs()[int(index)] reduce_graph = bottle.request.GET.get('reduce', '') if reduce_graph: graph = graph.reduce_to_cycles() if not graph: return None filename = 'garbage%so%s.png' % (index, reduce_graph) rendered_file = _get_graph(graph, filename) if rendered_file: bottle.send_file(rendered_file, root=server.tmpdir) else: return None
python
def garbage_graph(index): """Get graph representation of reference cycle.""" graph = _compute_garbage_graphs()[int(index)] reduce_graph = bottle.request.GET.get('reduce', '') if reduce_graph: graph = graph.reduce_to_cycles() if not graph: return None filename = 'garbage%so%s.png' % (index, reduce_graph) rendered_file = _get_graph(graph, filename) if rendered_file: bottle.send_file(rendered_file, root=server.tmpdir) else: return None
Get graph representation of reference cycle.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L248-L261
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
start_profiler
def start_profiler(host='localhost', port=8090, tracker=None, stats=None, debug=False, **kwargs): """ Start the web server to show profiling data. The function suspends the Python application (the current thread) until the web server is stopped. The only way to stop the server is to signal the running thread, e.g. press Ctrl+C in the console. If this isn't feasible for your application use `start_in_background` instead. During the execution of the web server, profiling data is (lazily) cached to improve performance. For example, garbage graphs are rendered when the garbage profiling data is requested and are simply retransmitted upon later requests. :param host: the host where the server shall run, default is localhost :param port: server listens on the specified port, default is 8090 to allow coexistance with common web applications :param tracker: `ClassTracker` instance, browse profiling data (on-line analysis) :param stats: `Stats` instance, analyze `ClassTracker` profiling dumps (useful for off-line analysis) """ if tracker and not stats: server.stats = tracker.stats else: server.stats = stats try: server.tmpdir = mkdtemp(prefix='pympler') server.server = PymplerServer(host=host, port=port, **kwargs) bottle.debug(debug) bottle.run(server=server.server) finally: rmtree(server.tmpdir)
python
def start_profiler(host='localhost', port=8090, tracker=None, stats=None, debug=False, **kwargs): """ Start the web server to show profiling data. The function suspends the Python application (the current thread) until the web server is stopped. The only way to stop the server is to signal the running thread, e.g. press Ctrl+C in the console. If this isn't feasible for your application use `start_in_background` instead. During the execution of the web server, profiling data is (lazily) cached to improve performance. For example, garbage graphs are rendered when the garbage profiling data is requested and are simply retransmitted upon later requests. :param host: the host where the server shall run, default is localhost :param port: server listens on the specified port, default is 8090 to allow coexistance with common web applications :param tracker: `ClassTracker` instance, browse profiling data (on-line analysis) :param stats: `Stats` instance, analyze `ClassTracker` profiling dumps (useful for off-line analysis) """ if tracker and not stats: server.stats = tracker.stats else: server.stats = stats try: server.tmpdir = mkdtemp(prefix='pympler') server.server = PymplerServer(host=host, port=port, **kwargs) bottle.debug(debug) bottle.run(server=server.server) finally: rmtree(server.tmpdir)
Start the web server to show profiling data. The function suspends the Python application (the current thread) until the web server is stopped. The only way to stop the server is to signal the running thread, e.g. press Ctrl+C in the console. If this isn't feasible for your application use `start_in_background` instead. During the execution of the web server, profiling data is (lazily) cached to improve performance. For example, garbage graphs are rendered when the garbage profiling data is requested and are simply retransmitted upon later requests. :param host: the host where the server shall run, default is localhost :param port: server listens on the specified port, default is 8090 to allow coexistance with common web applications :param tracker: `ClassTracker` instance, browse profiling data (on-line analysis) :param stats: `Stats` instance, analyze `ClassTracker` profiling dumps (useful for off-line analysis)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L277-L310
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py
_winreg_getShellFolder
def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
python
def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
Get a shell folder by string name from the registry
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py#L18-L28
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py
appdatadirectory
def appdatadirectory( ): """Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory. """ if shell: # on Win32 and have Win32all extensions, best-case return shell_getShellFolder(shellcon.CSIDL_APPDATA) if _winreg: # on Win32, but no Win32 shell com available, this uses # a direct registry access, likely to fail on Win98/Me return _winreg_getShellFolder( 'AppData' ) # okay, what if for some reason _winreg is missing? would we want to allow ctypes? ## default case, look for name in environ... for name in ['APPDATA', 'HOME']: if name in os.environ: return os.path.join( os.environ[name], '.config' ) # well, someone's being naughty, see if we can get ~ to expand to a directory... possible = os.path.abspath(os.path.expanduser( '~/.config' )) if os.path.exists( possible ): return possible raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""" )
python
def appdatadirectory( ): """Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory. """ if shell: # on Win32 and have Win32all extensions, best-case return shell_getShellFolder(shellcon.CSIDL_APPDATA) if _winreg: # on Win32, but no Win32 shell com available, this uses # a direct registry access, likely to fail on Win98/Me return _winreg_getShellFolder( 'AppData' ) # okay, what if for some reason _winreg is missing? would we want to allow ctypes? ## default case, look for name in environ... for name in ['APPDATA', 'HOME']: if name in os.environ: return os.path.join( os.environ[name], '.config' ) # well, someone's being naughty, see if we can get ~ to expand to a directory... possible = os.path.abspath(os.path.expanduser( '~/.config' )) if os.path.exists( possible ): return possible raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""" )
Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py#L40-L66
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
get_objects
def get_objects(remove_dups=True, include_frames=False): """Return a list of all known objects excluding frame objects. If (outer) frame objects shall be included, pass `include_frames=True`. In order to prevent building reference cycles, the current frame object (of the caller of get_objects) is ignored. This will not prevent creating reference cycles if the object list is passed up the call-stack. Therefore, frame objects are not included by default. Keyword arguments: remove_dups -- if True, all duplicate objects will be removed. include_frames -- if True, includes frame objects. """ gc.collect() # Do not initialize local variables before calling gc.get_objects or those # will be included in the list. Furthermore, ignore frame objects to # prevent reference cycles. tmp = gc.get_objects() tmp = [o for o in tmp if not isframe(o)] res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = get_referents(o) for ref in refs: if not _is_containerobject(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) if remove_dups: res = _remove_duplicates(res) if include_frames: for sf in stack()[2:]: res.append(sf[0]) return res
python
def get_objects(remove_dups=True, include_frames=False): """Return a list of all known objects excluding frame objects. If (outer) frame objects shall be included, pass `include_frames=True`. In order to prevent building reference cycles, the current frame object (of the caller of get_objects) is ignored. This will not prevent creating reference cycles if the object list is passed up the call-stack. Therefore, frame objects are not included by default. Keyword arguments: remove_dups -- if True, all duplicate objects will be removed. include_frames -- if True, includes frame objects. """ gc.collect() # Do not initialize local variables before calling gc.get_objects or those # will be included in the list. Furthermore, ignore frame objects to # prevent reference cycles. tmp = gc.get_objects() tmp = [o for o in tmp if not isframe(o)] res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = get_referents(o) for ref in refs: if not _is_containerobject(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) if remove_dups: res = _remove_duplicates(res) if include_frames: for sf in stack()[2:]: res.append(sf[0]) return res
Return a list of all known objects excluding frame objects. If (outer) frame objects shall be included, pass `include_frames=True`. In order to prevent building reference cycles, the current frame object (of the caller of get_objects) is ignored. This will not prevent creating reference cycles if the object list is passed up the call-stack. Therefore, frame objects are not included by default. Keyword arguments: remove_dups -- if True, all duplicate objects will be removed. include_frames -- if True, includes frame objects.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L17-L55
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
get_size
def get_size(objects): """Compute the total size of all elements in objects.""" res = 0 for o in objects: try: res += _getsizeof(o) except AttributeError: print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o))) return res
python
def get_size(objects): """Compute the total size of all elements in objects.""" res = 0 for o in objects: try: res += _getsizeof(o) except AttributeError: print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o))) return res
Compute the total size of all elements in objects.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L57-L65
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
get_diff
def get_diff(left, right): """Get the difference of both lists. The result will be a dict with this form {'+': [], '-': []}. Items listed in '+' exist only in the right list, items listed in '-' exist only in the left list. """ res = {'+': [], '-': []} def partition(objects): """Partition the passed object list.""" res = {} for o in objects: t = type(o) if type(o) not in res: res[t] = [] res[t].append(o) return res def get_not_included(foo, bar): """Compare objects from foo with objects defined in the values of bar (set of partitions). Returns a list of all objects included in list, but not dict values. """ res = [] for o in foo: if not compat.object_in_list(type(o), bar): res.append(o) elif not compat.object_in_list(o, bar[type(o)]): res.append(o) return res # Create partitions of both lists. This will reduce the time required for # the comparison left_objects = partition(left) right_objects = partition(right) # and then do the diff res['+'] = get_not_included(right, left_objects) res['-'] = get_not_included(left, right_objects) return res
python
def get_diff(left, right): """Get the difference of both lists. The result will be a dict with this form {'+': [], '-': []}. Items listed in '+' exist only in the right list, items listed in '-' exist only in the left list. """ res = {'+': [], '-': []} def partition(objects): """Partition the passed object list.""" res = {} for o in objects: t = type(o) if type(o) not in res: res[t] = [] res[t].append(o) return res def get_not_included(foo, bar): """Compare objects from foo with objects defined in the values of bar (set of partitions). Returns a list of all objects included in list, but not dict values. """ res = [] for o in foo: if not compat.object_in_list(type(o), bar): res.append(o) elif not compat.object_in_list(o, bar[type(o)]): res.append(o) return res # Create partitions of both lists. This will reduce the time required for # the comparison left_objects = partition(left) right_objects = partition(right) # and then do the diff res['+'] = get_not_included(right, left_objects) res['-'] = get_not_included(left, right_objects) return res
Get the difference of both lists. The result will be a dict with this form {'+': [], '-': []}. Items listed in '+' exist only in the right list, items listed in '-' exist only in the left list.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L67-L107
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
filter
def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter """Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size """ res = [] if min > max: raise ValueError("minimum must be smaller than maximum") if Type is not None: res = [o for o in objects if isinstance(o, Type)] if min > -1: res = [o for o in res if _getsizeof(o) < min] if max > -1: res = [o for o in res if _getsizeof(o) > max] return res
python
def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter """Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size """ res = [] if min > max: raise ValueError("minimum must be smaller than maximum") if Type is not None: res = [o for o in objects if isinstance(o, Type)] if min > -1: res = [o for o in res if _getsizeof(o) < min] if max > -1: res = [o for o in res if _getsizeof(o) > max] return res
Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L114-L134
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
get_referents
def get_referents(object, level=1): """Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive. """ res = gc.get_referents(object) level -= 1 if level > 0: for o in res: res.extend(get_referents(o, level)) res = _remove_duplicates(res) return res
python
def get_referents(object, level=1): """Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive. """ res = gc.get_referents(object) level -= 1 if level > 0: for o in res: res.extend(get_referents(o, level)) res = _remove_duplicates(res) return res
Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L136-L154
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
_get_usage
def _get_usage(function, *args): """Test if more memory is used after the function has been called. The function will be invoked twice and only the second measurement will be considered. Thus, memory used in initialisation (e.g. loading modules) will not be included in the result. The goal is to identify memory leaks caused by functions which use more and more memory. Any arguments next to the function will be passed on to the function on invocation. Note that this function is currently experimental, because it is not tested thoroughly and performs poorly. """ # The usage of a function is calculated by creating one summary of all # objects before the function is invoked and afterwards. These summaries # are compared and the diff is returned. # This function works in a 2-steps process. Before the actual function is # invoked an empty dummy function is measurement to identify the overhead # involved in the measuring process. This overhead then is subtracted from # the measurement performed on the passed function. The result reflects the # actual usage of a function call. # Also, a measurement is performed twice, allowing the adjustment to # initializing things, e.g. modules res = None def _get_summaries(function, *args): """Get a 2-tuple containing one summary from before, and one summary from after the function has been invoked. """ s_before = summary.summarize(get_objects()) function(*args) s_after = summary.summarize(get_objects()) return (s_before, s_after) def _get_usage(function, *args): """Get the usage of a function call. This function is to be used only internally. The 'real' get_usage function is a wrapper around _get_usage, but the workload is done here. """ res = [] # init before calling (s_before, s_after) = _get_summaries(function, *args) # ignore all objects used for the measurement ignore = [] if s_before != s_after: ignore.append(s_before) for row in s_before: # ignore refs from summary and frame (loop) if len(gc.get_referrers(row)) == 2: ignore.append(row) for item in row: # ignore refs from summary and frame (loop) if len(gc.get_referrers(item)) == 2: ignore.append(item) for o in ignore: s_after = summary._subtract(s_after, o) res = summary.get_diff(s_before, s_after) return summary._sweep(res) # calibrate; twice for initialization def noop(): pass offset = _get_usage(noop) offset = _get_usage(noop) # perform operation twice to handle objects possibly used in # initialisation tmp = _get_usage(function, *args) tmp = _get_usage(function, *args) tmp = summary.get_diff(offset, tmp) tmp = summary._sweep(tmp) if len(tmp) != 0: res = tmp return res
python
def _get_usage(function, *args): """Test if more memory is used after the function has been called. The function will be invoked twice and only the second measurement will be considered. Thus, memory used in initialisation (e.g. loading modules) will not be included in the result. The goal is to identify memory leaks caused by functions which use more and more memory. Any arguments next to the function will be passed on to the function on invocation. Note that this function is currently experimental, because it is not tested thoroughly and performs poorly. """ # The usage of a function is calculated by creating one summary of all # objects before the function is invoked and afterwards. These summaries # are compared and the diff is returned. # This function works in a 2-steps process. Before the actual function is # invoked an empty dummy function is measurement to identify the overhead # involved in the measuring process. This overhead then is subtracted from # the measurement performed on the passed function. The result reflects the # actual usage of a function call. # Also, a measurement is performed twice, allowing the adjustment to # initializing things, e.g. modules res = None def _get_summaries(function, *args): """Get a 2-tuple containing one summary from before, and one summary from after the function has been invoked. """ s_before = summary.summarize(get_objects()) function(*args) s_after = summary.summarize(get_objects()) return (s_before, s_after) def _get_usage(function, *args): """Get the usage of a function call. This function is to be used only internally. The 'real' get_usage function is a wrapper around _get_usage, but the workload is done here. """ res = [] # init before calling (s_before, s_after) = _get_summaries(function, *args) # ignore all objects used for the measurement ignore = [] if s_before != s_after: ignore.append(s_before) for row in s_before: # ignore refs from summary and frame (loop) if len(gc.get_referrers(row)) == 2: ignore.append(row) for item in row: # ignore refs from summary and frame (loop) if len(gc.get_referrers(item)) == 2: ignore.append(item) for o in ignore: s_after = summary._subtract(s_after, o) res = summary.get_diff(s_before, s_after) return summary._sweep(res) # calibrate; twice for initialization def noop(): pass offset = _get_usage(noop) offset = _get_usage(noop) # perform operation twice to handle objects possibly used in # initialisation tmp = _get_usage(function, *args) tmp = _get_usage(function, *args) tmp = summary.get_diff(offset, tmp) tmp = summary._sweep(tmp) if len(tmp) != 0: res = tmp return res
Test if more memory is used after the function has been called. The function will be invoked twice and only the second measurement will be considered. Thus, memory used in initialisation (e.g. loading modules) will not be included in the result. The goal is to identify memory leaks caused by functions which use more and more memory. Any arguments next to the function will be passed on to the function on invocation. Note that this function is currently experimental, because it is not tested thoroughly and performs poorly.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L156-L233
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
_remove_duplicates
def _remove_duplicates(objects): """Remove duplicate objects. Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark """ seen = {} result = [] for item in objects: marker = id(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result
python
def _remove_duplicates(objects): """Remove duplicate objects. Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark """ seen = {} result = [] for item in objects: marker = id(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result
Remove duplicate objects. Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L242-L256
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/action_sorter.py
ActionSorter.get_optionals_without_choices
def get_optionals_without_choices(self, actions): """ All actions which are: (a) Optional, but without required choices (b) Not of a "boolean" type (storeTrue, etc..) (c) Of type _AppendAction e.g. anything which has an argument style like: >>> -f myfilename.txt """ boolean_actions = ( _StoreConstAction, _StoreFalseAction, _StoreTrueAction ) return [action for action in actions if action.option_strings and not action.choices and not isinstance(action, _CountAction) and not isinstance(action, _HelpAction) and type(action) not in boolean_actions]
python
def get_optionals_without_choices(self, actions): """ All actions which are: (a) Optional, but without required choices (b) Not of a "boolean" type (storeTrue, etc..) (c) Of type _AppendAction e.g. anything which has an argument style like: >>> -f myfilename.txt """ boolean_actions = ( _StoreConstAction, _StoreFalseAction, _StoreTrueAction ) return [action for action in actions if action.option_strings and not action.choices and not isinstance(action, _CountAction) and not isinstance(action, _HelpAction) and type(action) not in boolean_actions]
All actions which are: (a) Optional, but without required choices (b) Not of a "boolean" type (storeTrue, etc..) (c) Of type _AppendAction e.g. anything which has an argument style like: >>> -f myfilename.txt
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/action_sorter.py#L91-L111
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/action_sorter.py
ActionSorter.get_flag_style_optionals
def get_flag_style_optionals(self, actions): """ Gets all instances of "flag" type options. i.e. options which either store a const, or store boolean style options (e.g. StoreTrue). Types: _StoreTrueAction _StoreFalseAction _StoreConst """ return [action for action in actions if isinstance(action, _StoreTrueAction) or isinstance(action, _StoreFalseAction) or isinstance(action, _StoreConstAction)]
python
def get_flag_style_optionals(self, actions): """ Gets all instances of "flag" type options. i.e. options which either store a const, or store boolean style options (e.g. StoreTrue). Types: _StoreTrueAction _StoreFalseAction _StoreConst """ return [action for action in actions if isinstance(action, _StoreTrueAction) or isinstance(action, _StoreFalseAction) or isinstance(action, _StoreConstAction)]
Gets all instances of "flag" type options. i.e. options which either store a const, or store boolean style options (e.g. StoreTrue). Types: _StoreTrueAction _StoreFalseAction _StoreConst
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/action_sorter.py#L122-L136
lrq3000/pyFileFixity
pyFileFixity/lib/gooey/gui/imageutil.py
resize_bitmap
def resize_bitmap(parent, _bitmap, target_height): ''' Resizes a bitmap to a height of 89 pixels (the size of the top panel), while keeping aspect ratio in tact ''' image = wx.ImageFromBitmap(_bitmap) _width, _height = image.GetSize() if _height < target_height: return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image)) ratio = float(_width) / _height image = image.Scale(target_height * ratio, target_height, wx.IMAGE_QUALITY_HIGH) return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image))
python
def resize_bitmap(parent, _bitmap, target_height): ''' Resizes a bitmap to a height of 89 pixels (the size of the top panel), while keeping aspect ratio in tact ''' image = wx.ImageFromBitmap(_bitmap) _width, _height = image.GetSize() if _height < target_height: return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image)) ratio = float(_width) / _height image = image.Scale(target_height * ratio, target_height, wx.IMAGE_QUALITY_HIGH) return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image))
Resizes a bitmap to a height of 89 pixels (the size of the top panel), while keeping aspect ratio in tact
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/imageutil.py#L16-L28
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_iterators.py
ilevenshtein
def ilevenshtein(seq1, seqs, max_dist=-1): """Compute the Levenshtein distance between the sequence `seq1` and the series of sequences `seqs`. `seq1`: the reference sequence `seqs`: a series of sequences (can be a generator) `max_dist`: if provided and > 0, only the sequences which distance from the reference sequence is lower or equal to this value will be returned. The return value is a series of pairs (distance, sequence). The sequence objects in `seqs` are expected to be of the same kind than the reference sequence in the C implementation; the same holds true for `ifast_comp`. """ for seq2 in seqs: dist = levenshtein(seq1, seq2, max_dist=max_dist) if dist != -1: yield dist, seq2
python
def ilevenshtein(seq1, seqs, max_dist=-1): """Compute the Levenshtein distance between the sequence `seq1` and the series of sequences `seqs`. `seq1`: the reference sequence `seqs`: a series of sequences (can be a generator) `max_dist`: if provided and > 0, only the sequences which distance from the reference sequence is lower or equal to this value will be returned. The return value is a series of pairs (distance, sequence). The sequence objects in `seqs` are expected to be of the same kind than the reference sequence in the C implementation; the same holds true for `ifast_comp`. """ for seq2 in seqs: dist = levenshtein(seq1, seq2, max_dist=max_dist) if dist != -1: yield dist, seq2
Compute the Levenshtein distance between the sequence `seq1` and the series of sequences `seqs`. `seq1`: the reference sequence `seqs`: a series of sequences (can be a generator) `max_dist`: if provided and > 0, only the sequences which distance from the reference sequence is lower or equal to this value will be returned. The return value is a series of pairs (distance, sequence). The sequence objects in `seqs` are expected to be of the same kind than the reference sequence in the C implementation; the same holds true for `ifast_comp`.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_iterators.py#L3-L21
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_iterators.py
ifast_comp
def ifast_comp(seq1, seqs, transpositions=False): """Return an iterator over all the sequences in `seqs` which distance from `seq1` is lower or equal to 2. The sequences which distance from the reference sequence is higher than that are dropped. `seq1`: the reference sequence. `seqs`: a series of sequences (can be a generator) `transpositions` has the same sense than in `fast_comp`. The return value is a series of pairs (distance, sequence). You might want to call `sorted()` on the iterator to get the results in a significant order: >>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"]) >>> sorted(g) [(0, 'foo'), (1, 'fo'), (1, 'foob')] """ for seq2 in seqs: dist = fast_comp(seq1, seq2, transpositions) if dist != -1: yield dist, seq2
python
def ifast_comp(seq1, seqs, transpositions=False): """Return an iterator over all the sequences in `seqs` which distance from `seq1` is lower or equal to 2. The sequences which distance from the reference sequence is higher than that are dropped. `seq1`: the reference sequence. `seqs`: a series of sequences (can be a generator) `transpositions` has the same sense than in `fast_comp`. The return value is a series of pairs (distance, sequence). You might want to call `sorted()` on the iterator to get the results in a significant order: >>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"]) >>> sorted(g) [(0, 'foo'), (1, 'fo'), (1, 'foob')] """ for seq2 in seqs: dist = fast_comp(seq1, seq2, transpositions) if dist != -1: yield dist, seq2
Return an iterator over all the sequences in `seqs` which distance from `seq1` is lower or equal to 2. The sequences which distance from the reference sequence is higher than that are dropped. `seq1`: the reference sequence. `seqs`: a series of sequences (can be a generator) `transpositions` has the same sense than in `fast_comp`. The return value is a series of pairs (distance, sequence). You might want to call `sorted()` on the iterator to get the results in a significant order: >>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"]) >>> sorted(g) [(0, 'foo'), (1, 'fo'), (1, 'foob')]
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_iterators.py#L24-L45
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
summarize
def summarize(objects): """Summarize an objects list. Return a list of lists, whereas each row consists of:: [str(type), number of objects of this type, total size of these objects]. No guarantee regarding the order is given. """ count = {} total_size = {} for o in objects: otype = _repr(o) if otype in count: count[otype] += 1 total_size[otype] += _getsizeof(o) else: count[otype] = 1 total_size[otype] = _getsizeof(o) rows = [] for otype in count: rows.append([otype, count[otype], total_size[otype]]) return rows
python
def summarize(objects): """Summarize an objects list. Return a list of lists, whereas each row consists of:: [str(type), number of objects of this type, total size of these objects]. No guarantee regarding the order is given. """ count = {} total_size = {} for o in objects: otype = _repr(o) if otype in count: count[otype] += 1 total_size[otype] += _getsizeof(o) else: count[otype] = 1 total_size[otype] = _getsizeof(o) rows = [] for otype in count: rows.append([otype, count[otype], total_size[otype]]) return rows
Summarize an objects list. Return a list of lists, whereas each row consists of:: [str(type), number of objects of this type, total size of these objects]. No guarantee regarding the order is given.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L112-L134
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
get_diff
def get_diff(left, right): """Get the difference of two summaries. Subtracts the values of the right summary from the values of the left summary. If similar rows appear on both sides, the are included in the summary with 0 for number of elements and total size. If the number of elements of a row of the diff is 0, but the total size is not, it means that objects likely have changed, but not there number, thus resulting in a changed size. """ res = [] for row_r in right: found = False for row_l in left: if row_r[0] == row_l[0]: res.append([row_r[0], row_r[1] - row_l[1], row_r[2] - row_l[2]]) found = True if not found: res.append(row_r) for row_l in left: found = False for row_r in right: if row_l[0] == row_r[0]: found = True if not found: res.append([row_l[0], -row_l[1], -row_l[2]]) return res
python
def get_diff(left, right): """Get the difference of two summaries. Subtracts the values of the right summary from the values of the left summary. If similar rows appear on both sides, the are included in the summary with 0 for number of elements and total size. If the number of elements of a row of the diff is 0, but the total size is not, it means that objects likely have changed, but not there number, thus resulting in a changed size. """ res = [] for row_r in right: found = False for row_l in left: if row_r[0] == row_l[0]: res.append([row_r[0], row_r[1] - row_l[1], row_r[2] - row_l[2]]) found = True if not found: res.append(row_r) for row_l in left: found = False for row_r in right: if row_l[0] == row_r[0]: found = True if not found: res.append([row_l[0], -row_l[1], -row_l[2]]) return res
Get the difference of two summaries. Subtracts the values of the right summary from the values of the left summary. If similar rows appear on both sides, the are included in the summary with 0 for number of elements and total size. If the number of elements of a row of the diff is 0, but the total size is not, it means that objects likely have changed, but not there number, thus resulting in a changed size.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L136-L165
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
print_
def print_(rows, limit=15, sort='size', order='descending'): """Print the rows as a summary. Keyword arguments: limit -- the maximum number of elements to be listed sort -- sort elements by 'size', 'type', or '#' order -- sort 'ascending' or 'descending' """ localrows = [] for row in rows: localrows.append(list(row)) # input validation sortby = ['type', '#', 'size'] if sort not in sortby: raise ValueError("invalid sort, should be one of" + str(sortby)) orders = ['ascending', 'descending'] if order not in orders: raise ValueError("invalid order, should be one of" + str(orders)) # sort rows if sortby.index(sort) == 0: if order == "ascending": localrows.sort(key=lambda x: _repr(x[0])) elif order == "descending": localrows.sort(key=lambda x: _repr(x[0]), reverse=True) else: if order == "ascending": localrows.sort(key=lambda x: x[sortby.index(sort)]) elif order == "descending": localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True) # limit rows localrows = localrows[0:limit] for row in localrows: row[2] = stringutils.pp(row[2]) # print rows localrows.insert(0,["types", "# objects", "total size"]) _print_table(localrows)
python
def print_(rows, limit=15, sort='size', order='descending'): """Print the rows as a summary. Keyword arguments: limit -- the maximum number of elements to be listed sort -- sort elements by 'size', 'type', or '#' order -- sort 'ascending' or 'descending' """ localrows = [] for row in rows: localrows.append(list(row)) # input validation sortby = ['type', '#', 'size'] if sort not in sortby: raise ValueError("invalid sort, should be one of" + str(sortby)) orders = ['ascending', 'descending'] if order not in orders: raise ValueError("invalid order, should be one of" + str(orders)) # sort rows if sortby.index(sort) == 0: if order == "ascending": localrows.sort(key=lambda x: _repr(x[0])) elif order == "descending": localrows.sort(key=lambda x: _repr(x[0]), reverse=True) else: if order == "ascending": localrows.sort(key=lambda x: x[sortby.index(sort)]) elif order == "descending": localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True) # limit rows localrows = localrows[0:limit] for row in localrows: row[2] = stringutils.pp(row[2]) # print rows localrows.insert(0,["types", "# objects", "total size"]) _print_table(localrows)
Print the rows as a summary. Keyword arguments: limit -- the maximum number of elements to be listed sort -- sort elements by 'size', 'type', or '#' order -- sort 'ascending' or 'descending'
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L167-L202
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
_print_table
def _print_table(rows, header=True): """Print a list of lists as a pretty table. Keyword arguments: header -- if True the first row is treated as a table header inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662 """ border = "=" # vertical delimiter vdelim = " | " # padding nr. of spaces are left around the longest element in the # column padding = 1 # may be left,center,right justify = 'right' justify = {'left' : str.ljust, 'center' : str.center, 'right' : str.rjust}[justify.lower()] # calculate column widths (longest item in each col # plus "padding" nr of spaces on both sides) cols = zip(*rows) colWidths = [max([len(str(item))+2*padding for item in col]) for col in cols] borderline = vdelim.join([w*border for w in colWidths]) for row in rows: print(vdelim.join([justify(str(item),width) for (item,width) in zip(row,colWidths)])) if header: print(borderline) header=False
python
def _print_table(rows, header=True): """Print a list of lists as a pretty table. Keyword arguments: header -- if True the first row is treated as a table header inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662 """ border = "=" # vertical delimiter vdelim = " | " # padding nr. of spaces are left around the longest element in the # column padding = 1 # may be left,center,right justify = 'right' justify = {'left' : str.ljust, 'center' : str.center, 'right' : str.rjust}[justify.lower()] # calculate column widths (longest item in each col # plus "padding" nr of spaces on both sides) cols = zip(*rows) colWidths = [max([len(str(item))+2*padding for item in col]) for col in cols] borderline = vdelim.join([w*border for w in colWidths]) for row in rows: print(vdelim.join([justify(str(item),width) for (item,width) in zip(row,colWidths)])) if header: print(borderline) header=False
Print a list of lists as a pretty table. Keyword arguments: header -- if True the first row is treated as a table header inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L204-L232
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
_repr
def _repr(o, verbosity=1): """Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header """ res = "" t = type(o) if (verbosity == 0) or (t not in representations): res = str(t) else: verbosity -= 1 if len(representations[t]) < verbosity: verbosity = len(representations[t]) - 1 res = representations[t][verbosity](o) res = address.sub('', res) res = type_prefix.sub('', res) res = type_suffix.sub('', res) return res
python
def _repr(o, verbosity=1): """Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header """ res = "" t = type(o) if (verbosity == 0) or (t not in representations): res = str(t) else: verbosity -= 1 if len(representations[t]) < verbosity: verbosity = len(representations[t]) - 1 res = representations[t][verbosity](o) res = address.sub('', res) res = type_prefix.sub('', res) res = type_suffix.sub('', res) return res
Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L240-L266
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
_traverse
def _traverse(summary, function, *args): """Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row """ function(summary, *args) for row in summary: function(row, *args) for item in row: function(item, *args)
python
def _traverse(summary, function, *args): """Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row """ function(summary, *args) for row in summary: function(row, *args) for item in row: function(item, *args)
Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L268-L281
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
_subtract
def _subtract(summary, o): """Remove object o from the summary by subtracting it's size.""" found = False row = [_repr(o), 1, _getsizeof(o)] for r in summary: if r[0] == row[0]: (r[1], r[2]) = (r[1] - row[1], r[2] - row[2]) found = True if not found: summary.append([row[0], -row[1], -row[2]]) return summary
python
def _subtract(summary, o): """Remove object o from the summary by subtracting it's size.""" found = False row = [_repr(o), 1, _getsizeof(o)] for r in summary: if r[0] == row[0]: (r[1], r[2]) = (r[1] - row[1], r[2] - row[2]) found = True if not found: summary.append([row[0], -row[1], -row[2]]) return summary
Remove object o from the summary by subtracting it's size.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L283-L293
lrq3000/pyFileFixity
pyFileFixity/rfigc.py
check_structure
def check_structure(filepath): """Returns False if the file is okay, None if file format is unsupported by PIL/PILLOW, or returns an error string if the file is corrupt.""" #http://stackoverflow.com/questions/1401527/how-do-i-programmatically-check-whether-an-image-png-jpeg-or-gif-is-corrupted/1401565#1401565 # Check structure only for images (not supported for other types currently) if filepath.lower().endswith(tuple(img_filter)): try: #try: im = PIL.Image.open(filepath) #except IOError: # File format not supported by PIL, we skip the check_structure - ARG this is also raised if a supported image file is corrupted... #print("File: %s: DETECTNOPE" % filepath) #return None im.verify() # If an error occurred, the structure is corrupted except Exception as e: return str(e) # Else no exception, there's no corruption return False # Else the format does not currently support structure checking, we just return None to signal we didin't check else: return None
python
def check_structure(filepath): """Returns False if the file is okay, None if file format is unsupported by PIL/PILLOW, or returns an error string if the file is corrupt.""" #http://stackoverflow.com/questions/1401527/how-do-i-programmatically-check-whether-an-image-png-jpeg-or-gif-is-corrupted/1401565#1401565 # Check structure only for images (not supported for other types currently) if filepath.lower().endswith(tuple(img_filter)): try: #try: im = PIL.Image.open(filepath) #except IOError: # File format not supported by PIL, we skip the check_structure - ARG this is also raised if a supported image file is corrupted... #print("File: %s: DETECTNOPE" % filepath) #return None im.verify() # If an error occurred, the structure is corrupted except Exception as e: return str(e) # Else no exception, there's no corruption return False # Else the format does not currently support structure checking, we just return None to signal we didin't check else: return None
Returns False if the file is okay, None if file format is unsupported by PIL/PILLOW, or returns an error string if the file is corrupt.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/rfigc.py#L76-L96
lrq3000/pyFileFixity
pyFileFixity/rfigc.py
generate_hashes
def generate_hashes(filepath, blocksize=65536): '''Generate several hashes (md5 and sha1) in a single sweep of the file. Using two hashes lowers the probability of collision and false negative (file modified but the hash is the same). Supports big files by streaming blocks by blocks to the hasher automatically. Blocksize can be any multiple of 128.''' # Init hashers hasher_md5 = hashlib.md5() hasher_sha1 = hashlib.sha1() # Read the file blocks by blocks with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: # Compute both hashes at the same time hasher_md5.update(buf) hasher_sha1.update(buf) # Load the next data block from file buf = afile.read(blocksize) return (hasher_md5.hexdigest(), hasher_sha1.hexdigest())
python
def generate_hashes(filepath, blocksize=65536): '''Generate several hashes (md5 and sha1) in a single sweep of the file. Using two hashes lowers the probability of collision and false negative (file modified but the hash is the same). Supports big files by streaming blocks by blocks to the hasher automatically. Blocksize can be any multiple of 128.''' # Init hashers hasher_md5 = hashlib.md5() hasher_sha1 = hashlib.sha1() # Read the file blocks by blocks with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: # Compute both hashes at the same time hasher_md5.update(buf) hasher_sha1.update(buf) # Load the next data block from file buf = afile.read(blocksize) return (hasher_md5.hexdigest(), hasher_sha1.hexdigest())
Generate several hashes (md5 and sha1) in a single sweep of the file. Using two hashes lowers the probability of collision and false negative (file modified but the hash is the same). Supports big files by streaming blocks by blocks to the hasher automatically. Blocksize can be any multiple of 128.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/rfigc.py#L98-L112
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.get_degree
def get_degree(self, poly=None): '''Returns the degree of the polynomial''' if not poly: return self.degree #return len(self.coefficients) - 1 elif poly and hasattr("coefficients", poly): return len(poly.coefficients) - 1 else: while poly and poly[-1] == 0: poly.pop() # normalize return len(poly)-1
python
def get_degree(self, poly=None): '''Returns the degree of the polynomial''' if not poly: return self.degree #return len(self.coefficients) - 1 elif poly and hasattr("coefficients", poly): return len(poly.coefficients) - 1 else: while poly and poly[-1] == 0: poly.pop() # normalize return len(poly)-1
Returns the degree of the polynomial
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L87-L97
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.mul_at
def mul_at(self, other, k): '''Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)''' if k > (self.degree + other.degree) or k > self.degree: return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0 term = 0 for i in _range(min(len(self), len(other))): coef1 = self.coefficients[-(k-i+1)] coef2 = other.coefficients[-(i+1)] if coef1 == 0 or coef2 == 0: continue # log(0) is undefined, skip (and in addition it's a nice optimization) term += coef1 * coef2 return term
python
def mul_at(self, other, k): '''Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)''' if k > (self.degree + other.degree) or k > self.degree: return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0 term = 0 for i in _range(min(len(self), len(other))): coef1 = self.coefficients[-(k-i+1)] coef2 = other.coefficients[-(i+1)] if coef1 == 0 or coef2 == 0: continue # log(0) is undefined, skip (and in addition it's a nice optimization) term += coef1 * coef2 return term
Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L132-L143
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.scale
def scale(self, scalar): '''Multiply a polynomial with a scalar''' return self.__class__([self.coefficients[i] * scalar for i in _range(len(self))])
python
def scale(self, scalar): '''Multiply a polynomial with a scalar''' return self.__class__([self.coefficients[i] * scalar for i in _range(len(self))])
Multiply a polynomial with a scalar
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L145-L147
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial._fastdivmod
def _fastdivmod(dividend, divisor): '''Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials. A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.''' # Note: for RS encoding, you should supply divisor = mprime (not m, you need the padded message) msg_out = list(dividend) # Copy the dividend normalizer = divisor[0] # precomputing for performance for i in _range(len(dividend)-(len(divisor)-1)): msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient if divisor[j] != 0: # log(0) is undefined so we need to avoid that case msg_out[i + j] += -divisor[j] * coef # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
python
def _fastdivmod(dividend, divisor): '''Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials. A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.''' # Note: for RS encoding, you should supply divisor = mprime (not m, you need the padded message) msg_out = list(dividend) # Copy the dividend normalizer = divisor[0] # precomputing for performance for i in _range(len(dividend)-(len(divisor)-1)): msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient if divisor[j] != 0: # log(0) is undefined so we need to avoid that case msg_out[i + j] += -divisor[j] * coef # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials. A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L162-L178
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial._gffastdivmod
def _gffastdivmod(dividend, divisor): '''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int). Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)''' msg_out = list(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed for i in _range(len(dividend)-(len(divisor)-1)): coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic) #if divisor[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0 msg_out[i + j] ^= divisor[j] * coef # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef # Note: we could speed things up a bit if we could inline the table lookups, but the Polynomial class is generic, it doesn't know anything about the underlying fields and their operators. Good OOP design, bad for performances in Python because of function calls and the optimizations we can't do (such as precomputing gf_exp[divisor]). That's what is done in reedsolo lib, this is one of the reasons it is faster. # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
python
def _gffastdivmod(dividend, divisor): '''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int). Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)''' msg_out = list(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed for i in _range(len(dividend)-(len(divisor)-1)): coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic) #if divisor[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0 msg_out[i + j] ^= divisor[j] * coef # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef # Note: we could speed things up a bit if we could inline the table lookups, but the Polynomial class is generic, it doesn't know anything about the underlying fields and their operators. Good OOP design, bad for performances in Python because of function calls and the optimizations we can't do (such as precomputing gf_exp[divisor]). That's what is done in reedsolo lib, this is one of the reasons it is faster. # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int). Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L180-L196
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.evaluate
def evaluate(self, x): '''Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).''' # Holds the sum over each term in the polynomial #c = 0 # Holds the current power of x. This is multiplied by x after each term # in the polynomial is added up. Initialized to x^0 = 1 #p = 1 #for term in self.coefficients[::-1]: # c = c + term * p # p = p * x #return c # Faster alternative using Horner's Scheme y = self[0] for i in _range(1, len(self)): y = y * x + self.coefficients[i] return y
python
def evaluate(self, x): '''Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).''' # Holds the sum over each term in the polynomial #c = 0 # Holds the current power of x. This is multiplied by x after each term # in the polynomial is added up. Initialized to x^0 = 1 #p = 1 #for term in self.coefficients[::-1]: # c = c + term * p # p = p * x #return c # Faster alternative using Horner's Scheme y = self[0] for i in _range(1, len(self)): y = y * x + self.coefficients[i] return y
Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L331-L349
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.evaluate_array
def evaluate_array(self, x): '''Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum''' x_gf = self.coefficients[0].__class__(x) arr = [self.coefficients[-i]*x_gf**(i-1) for i in _range(len(self), 0, -1)] # if x == 1: arr = sum(self.coefficients) return arr, sum(arr)
python
def evaluate_array(self, x): '''Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum''' x_gf = self.coefficients[0].__class__(x) arr = [self.coefficients[-i]*x_gf**(i-1) for i in _range(len(self), 0, -1)] # if x == 1: arr = sum(self.coefficients) return arr, sum(arr)
Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L351-L356
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.derive
def derive(self): '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
python
def derive(self): '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L358-L369
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
feature_scaling
def feature_scaling(x, xmin, xmax, a=0, b=1): '''Generalized feature scaling (useful for variable error correction rate calculation)''' return a + float(x - xmin) * (b - a) / (xmax - xmin)
python
def feature_scaling(x, xmin, xmax, a=0, b=1): '''Generalized feature scaling (useful for variable error correction rate calculation)''' return a + float(x - xmin) * (b - a) / (xmax - xmin)
Generalized feature scaling (useful for variable error correction rate calculation)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L91-L93
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
entry_fields
def entry_fields(file, entry_pos, field_delim="\xFF"): '''From an ecc entry position (a list with starting and ending positions), extract the metadata fields (filename, filesize, ecc for both), and the starting/ending positions of the ecc stream (containing variably encoded blocks of hash and ecc per blocks of the original file's header)''' # Read the the beginning of the ecc entry blocksize = 65535 file.seek(entry_pos[0]) entry = file.read(blocksize) entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # TODO: do in a while loop in case the filename is really big (bigger than blocksize) - or in case we add intra-ecc for filename # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field_pos = [entry_pos[0]+fourth+len(field_delim), entry_pos[1]] # return the starting and ending position of the rest of the ecc track, which contains blocks of hash/ecc of the original file's content. # Place the cursor at the beginning of the ecc_field file.seek(ecc_field_pos[0]) # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field_pos": ecc_field_pos}
python
def entry_fields(file, entry_pos, field_delim="\xFF"): '''From an ecc entry position (a list with starting and ending positions), extract the metadata fields (filename, filesize, ecc for both), and the starting/ending positions of the ecc stream (containing variably encoded blocks of hash and ecc per blocks of the original file's header)''' # Read the the beginning of the ecc entry blocksize = 65535 file.seek(entry_pos[0]) entry = file.read(blocksize) entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # TODO: do in a while loop in case the filename is really big (bigger than blocksize) - or in case we add intra-ecc for filename # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field_pos = [entry_pos[0]+fourth+len(field_delim), entry_pos[1]] # return the starting and ending position of the rest of the ecc track, which contains blocks of hash/ecc of the original file's content. # Place the cursor at the beginning of the ecc_field file.seek(ecc_field_pos[0]) # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field_pos": ecc_field_pos}
From an ecc entry position (a list with starting and ending positions), extract the metadata fields (filename, filesize, ecc for both), and the starting/ending positions of the ecc stream (containing variably encoded blocks of hash and ecc per blocks of the original file's header)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L97-L135
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
stream_entry_assemble
def stream_entry_assemble(hasher, file, eccfile, entry_fields, max_block_size, header_size, resilience_rates, constantmode=False): '''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block eccfile.seek(entry_fields["ecc_field_pos"][0]) curpos = file.tell() ecc_curpos = eccfile.tell() while (ecc_curpos < entry_fields["ecc_field_pos"][1]): # continue reading the input file until we reach the position of the previously detected ending marker # Compute the current rate, depending on where we are inside the input file (headers? later stage?) if curpos < header_size or constantmode: # header stage: constant rate rate = resilience_rates[0] else: # later stage 2 or 3: progressive rate rate = feature_scaling(curpos, header_size, entry_fields["filesize"], resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # From the rate, compute the ecc parameters ecc_params = compute_ecc_params(max_block_size, rate, hasher) # Extract the message block from input file, given the computed ecc parameters mes = file.read(ecc_params["message_size"]) if len(mes) == 0: return # quit if message is empty (reached end-of-file), this is a safeguard if ecc pos ending was miscalculated (we thus only need the starting position to be correct) buf = eccfile.read(ecc_params["hash_size"]+ecc_params["ecc_size"]) hash = buf[:ecc_params["hash_size"]] ecc = buf[ecc_params["hash_size"]:] yield {"message": mes, "hash": hash, "ecc": ecc, "rate": rate, "ecc_params": ecc_params, "curpos": curpos, "ecc_curpos": ecc_curpos} # Prepare for the next iteration of the loop curpos = file.tell() ecc_curpos = eccfile.tell() # Just a quick safe guard against ecc ending marker misdetection file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() if curpos < size: print("WARNING: end of ecc track reached but not the end of file! Either the ecc ending marker was misdetected, or either the file hash changed! Some blocks maybe may not have been properly checked!")
python
def stream_entry_assemble(hasher, file, eccfile, entry_fields, max_block_size, header_size, resilience_rates, constantmode=False): '''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block eccfile.seek(entry_fields["ecc_field_pos"][0]) curpos = file.tell() ecc_curpos = eccfile.tell() while (ecc_curpos < entry_fields["ecc_field_pos"][1]): # continue reading the input file until we reach the position of the previously detected ending marker # Compute the current rate, depending on where we are inside the input file (headers? later stage?) if curpos < header_size or constantmode: # header stage: constant rate rate = resilience_rates[0] else: # later stage 2 or 3: progressive rate rate = feature_scaling(curpos, header_size, entry_fields["filesize"], resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # From the rate, compute the ecc parameters ecc_params = compute_ecc_params(max_block_size, rate, hasher) # Extract the message block from input file, given the computed ecc parameters mes = file.read(ecc_params["message_size"]) if len(mes) == 0: return # quit if message is empty (reached end-of-file), this is a safeguard if ecc pos ending was miscalculated (we thus only need the starting position to be correct) buf = eccfile.read(ecc_params["hash_size"]+ecc_params["ecc_size"]) hash = buf[:ecc_params["hash_size"]] ecc = buf[ecc_params["hash_size"]:] yield {"message": mes, "hash": hash, "ecc": ecc, "rate": rate, "ecc_params": ecc_params, "curpos": curpos, "ecc_curpos": ecc_curpos} # Prepare for the next iteration of the loop curpos = file.tell() ecc_curpos = eccfile.tell() # Just a quick safe guard against ecc ending marker misdetection file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() if curpos < size: print("WARNING: end of ecc track reached but not the end of file! Either the ecc ending marker was misdetected, or either the file hash changed! Some blocks maybe may not have been properly checked!")
From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L137-L165
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
stream_compute_ecc_hash
def stream_compute_ecc_hash(ecc_manager, hasher, file, max_block_size, header_size, resilience_rates): '''Generate a stream of hash/ecc blocks, of variable encoding rate and size, given a file.''' curpos = file.tell() # init the reading cursor at the beginning of the file # Find the total size to know when to stop #size = os.fstat(file.fileno()).st_size # old way of doing it, doesn't work with StringIO objects file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() file.seek(0, curpos) # place the reading cursor back at the beginning of the file # Main encoding loop while curpos < size: # Continue encoding while we do not reach the end of the file # Calculating the encoding rate if curpos < header_size: # if we are still reading the file's header, we use a constant rate rate = resilience_rates[0] else: # else we use a progressive rate for the rest of the file the we calculate on-the-fly depending on our current reading cursor position in the file rate = feature_scaling(curpos, header_size, size, resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # Compute the ecc parameters given the calculated rate ecc_params = compute_ecc_params(max_block_size, rate, hasher) #ecc_manager = ECCMan(max_block_size, ecc_params["message_size"]) # not necessary to create an ecc manager anymore, as it is very costly. Now we can specify a value for k on the fly (tables for all possible values of k are pre-generated in the reed-solomon libraries) # Compute the ecc and hash for the current message block mes = file.read(ecc_params["message_size"]) hash = hasher.hash(mes) ecc = ecc_manager.encode(mes, k=ecc_params["message_size"]) #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result yield [hash, ecc, ecc_params] # Prepare for next iteration curpos = file.tell()
python
def stream_compute_ecc_hash(ecc_manager, hasher, file, max_block_size, header_size, resilience_rates): '''Generate a stream of hash/ecc blocks, of variable encoding rate and size, given a file.''' curpos = file.tell() # init the reading cursor at the beginning of the file # Find the total size to know when to stop #size = os.fstat(file.fileno()).st_size # old way of doing it, doesn't work with StringIO objects file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() file.seek(0, curpos) # place the reading cursor back at the beginning of the file # Main encoding loop while curpos < size: # Continue encoding while we do not reach the end of the file # Calculating the encoding rate if curpos < header_size: # if we are still reading the file's header, we use a constant rate rate = resilience_rates[0] else: # else we use a progressive rate for the rest of the file the we calculate on-the-fly depending on our current reading cursor position in the file rate = feature_scaling(curpos, header_size, size, resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # Compute the ecc parameters given the calculated rate ecc_params = compute_ecc_params(max_block_size, rate, hasher) #ecc_manager = ECCMan(max_block_size, ecc_params["message_size"]) # not necessary to create an ecc manager anymore, as it is very costly. Now we can specify a value for k on the fly (tables for all possible values of k are pre-generated in the reed-solomon libraries) # Compute the ecc and hash for the current message block mes = file.read(ecc_params["message_size"]) hash = hasher.hash(mes) ecc = ecc_manager.encode(mes, k=ecc_params["message_size"]) #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result yield [hash, ecc, ecc_params] # Prepare for next iteration curpos = file.tell()
Generate a stream of hash/ecc blocks, of variable encoding rate and size, given a file.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L167-L196
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
compute_ecc_hash_from_string
def compute_ecc_hash_from_string(string, ecc_manager, hasher, max_block_size, resilience_rate): '''Generate a concatenated string of ecc stream of hash/ecc blocks, of constant encoding rate, given a string. NOTE: resilience_rate here is constant, you need to supply only one rate, between 0.0 and 1.0. The encoding rate will then be constant, like in header_ecc.py.''' fpfile = StringIO(string) ecc_stream = ''.join( [str(x[1]) for x in stream_compute_ecc_hash(ecc_manager, hasher, fpfile, max_block_size, len(string), [resilience_rate])] ) # "hack" the function by tricking it to always use a constant rate, by setting the header_size=len(relfilepath), and supplying the resilience_rate_intra instead of resilience_rate_s1 (the one for header) return ecc_stream
python
def compute_ecc_hash_from_string(string, ecc_manager, hasher, max_block_size, resilience_rate): '''Generate a concatenated string of ecc stream of hash/ecc blocks, of constant encoding rate, given a string. NOTE: resilience_rate here is constant, you need to supply only one rate, between 0.0 and 1.0. The encoding rate will then be constant, like in header_ecc.py.''' fpfile = StringIO(string) ecc_stream = ''.join( [str(x[1]) for x in stream_compute_ecc_hash(ecc_manager, hasher, fpfile, max_block_size, len(string), [resilience_rate])] ) # "hack" the function by tricking it to always use a constant rate, by setting the header_size=len(relfilepath), and supplying the resilience_rate_intra instead of resilience_rate_s1 (the one for header) return ecc_stream
Generate a concatenated string of ecc stream of hash/ecc blocks, of constant encoding rate, given a string. NOTE: resilience_rate here is constant, you need to supply only one rate, between 0.0 and 1.0. The encoding rate will then be constant, like in header_ecc.py.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L198-L203
lrq3000/pyFileFixity
pyFileFixity/structural_adaptive_ecc.py
ecc_correct_intra_stream
def ecc_correct_intra_stream(ecc_manager_intra, ecc_params_intra, hasher_intra, resilience_rate_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False, max_block_size=65535): """ Correct an intra-field with its corresponding intra-ecc if necessary """ # convert strings to StringIO object so that we can trick our ecc reading functions that normally works only on files fpfile = StringIO(field) fpfile_ecc = StringIO(ecc) fpentry_p = {"ecc_field_pos": [0, len(field)]} # create a fake entry_pos so that the ecc reading function works correctly # Prepare variables field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in stream_entry_assemble(hasher_intra, fpfile, fpfile_ecc, fpentry_p, max_block_size, len(field), [resilience_rate_intra], constantmode=True): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
python
def ecc_correct_intra_stream(ecc_manager_intra, ecc_params_intra, hasher_intra, resilience_rate_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False, max_block_size=65535): """ Correct an intra-field with its corresponding intra-ecc if necessary """ # convert strings to StringIO object so that we can trick our ecc reading functions that normally works only on files fpfile = StringIO(field) fpfile_ecc = StringIO(ecc) fpentry_p = {"ecc_field_pos": [0, len(field)]} # create a fake entry_pos so that the ecc reading function works correctly # Prepare variables field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in stream_entry_assemble(hasher_intra, fpfile, fpfile_ecc, fpentry_p, max_block_size, len(field), [resilience_rate_intra], constantmode=True): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
Correct an intra-field with its corresponding intra-ecc if necessary
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/structural_adaptive_ecc.py#L205-L240
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
memory_usage
def memory_usage(proc=-1, interval=.1, timeout=None, run_in_place=False): """ Return the memory usage of a process or piece of code Parameters ---------- proc : {int, string, tuple}, optional The process to monitor. Can be given by a PID, by a string containing a filename or by a tuple. The tuple should contain three values (f, args, kw) specifies to run the function f(*args, **kw). Set to -1 (default) for current process. interval : float, optional timeout : float, optional run_in_place : boolean, optional. False by default If False fork the process and retrieve timings from a different process. You shouldn't need to change this unless you are affected by this (http://blog.vene.ro/2012/07/04/on-why-my-memit-fails-on-osx) bug. Returns ------- mm : list of integers, size less than num memory usage, in KB """ ret = [] if timeout is not None: max_iter = timeout / interval elif isinstance(proc, int): # external process and no timeout max_iter = 1 else: # for a Python function wait until it finishes max_iter = float('inf') if str(proc).endswith('.py'): filename = _find_script(proc) with open(filename) as f: proc = f.read() raise NotImplementedError if isinstance(proc, (list, tuple)): if len(proc) == 1: f, args, kw = (proc[0], (), {}) elif len(proc) == 2: f, args, kw = (proc[0], proc[1], {}) elif len(proc) == 3: f, args, kw = (proc[0], proc[1], proc[2]) else: raise ValueError try: import multiprocessing except ImportError: print ('WARNING: cannot import module `multiprocessing`. Forcing to' ' run inplace.') # force inplace run_in_place = True if run_in_place: import threading main_thread = threading.Thread(target=f, args=args, kwargs=kw) else: main_thread = multiprocessing.Process(target=f, args=args, kwargs=kw) i = 0 main_thread.start() pid = getattr(main_thread, 'pid', os.getpid()) while i < max_iter and main_thread.is_alive(): m = _get_memory(pid) ret.append(m) time.sleep(interval) i += 1 main_thread.join() else: # external process if proc == -1: proc = os.getpid() if max_iter == -1: max_iter = 1 for _ in range(max_iter): ret.append(_get_memory(proc)) time.sleep(interval) return ret
python
def memory_usage(proc=-1, interval=.1, timeout=None, run_in_place=False): """ Return the memory usage of a process or piece of code Parameters ---------- proc : {int, string, tuple}, optional The process to monitor. Can be given by a PID, by a string containing a filename or by a tuple. The tuple should contain three values (f, args, kw) specifies to run the function f(*args, **kw). Set to -1 (default) for current process. interval : float, optional timeout : float, optional run_in_place : boolean, optional. False by default If False fork the process and retrieve timings from a different process. You shouldn't need to change this unless you are affected by this (http://blog.vene.ro/2012/07/04/on-why-my-memit-fails-on-osx) bug. Returns ------- mm : list of integers, size less than num memory usage, in KB """ ret = [] if timeout is not None: max_iter = timeout / interval elif isinstance(proc, int): # external process and no timeout max_iter = 1 else: # for a Python function wait until it finishes max_iter = float('inf') if str(proc).endswith('.py'): filename = _find_script(proc) with open(filename) as f: proc = f.read() raise NotImplementedError if isinstance(proc, (list, tuple)): if len(proc) == 1: f, args, kw = (proc[0], (), {}) elif len(proc) == 2: f, args, kw = (proc[0], proc[1], {}) elif len(proc) == 3: f, args, kw = (proc[0], proc[1], proc[2]) else: raise ValueError try: import multiprocessing except ImportError: print ('WARNING: cannot import module `multiprocessing`. Forcing to' ' run inplace.') # force inplace run_in_place = True if run_in_place: import threading main_thread = threading.Thread(target=f, args=args, kwargs=kw) else: main_thread = multiprocessing.Process(target=f, args=args, kwargs=kw) i = 0 main_thread.start() pid = getattr(main_thread, 'pid', os.getpid()) while i < max_iter and main_thread.is_alive(): m = _get_memory(pid) ret.append(m) time.sleep(interval) i += 1 main_thread.join() else: # external process if proc == -1: proc = os.getpid() if max_iter == -1: max_iter = 1 for _ in range(max_iter): ret.append(_get_memory(proc)) time.sleep(interval) return ret
Return the memory usage of a process or piece of code Parameters ---------- proc : {int, string, tuple}, optional The process to monitor. Can be given by a PID, by a string containing a filename or by a tuple. The tuple should contain three values (f, args, kw) specifies to run the function f(*args, **kw). Set to -1 (default) for current process. interval : float, optional timeout : float, optional run_in_place : boolean, optional. False by default If False fork the process and retrieve timings from a different process. You shouldn't need to change this unless you are affected by this (http://blog.vene.ro/2012/07/04/on-why-my-memit-fails-on-osx) bug. Returns ------- mm : list of integers, size less than num memory usage, in KB
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L49-L134
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
_find_script
def _find_script(script_name): """ Find the script. If the input is not a file, then $PATH will be searched. """ if os.path.isfile(script_name): return script_name path = os.getenv('PATH', os.defpath).split(os.pathsep) for dir in path: if dir == '': continue fn = os.path.join(dir, script_name) if os.path.isfile(fn): return fn print >> sys.stderr, 'Could not find script {0}'.format(script_name) raise SystemExit(1)
python
def _find_script(script_name): """ Find the script. If the input is not a file, then $PATH will be searched. """ if os.path.isfile(script_name): return script_name path = os.getenv('PATH', os.defpath).split(os.pathsep) for dir in path: if dir == '': continue fn = os.path.join(dir, script_name) if os.path.isfile(fn): return fn print >> sys.stderr, 'Could not find script {0}'.format(script_name) raise SystemExit(1)
Find the script. If the input is not a file, then $PATH will be searched.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L140-L156
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
magic_memit
def magic_memit(self, line=''): """Measure memory usage of a Python statement Usage, in line mode: %memit [-ir<R>t<T>] statement Options: -r<R>: repeat the loop iteration <R> times and take the best result. Default: 1 -i: run the code in the current environment, without forking a new process. This is required on some MacOS versions of Accelerate if your line contains a call to `np.dot`. -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None Examples -------- :: In [1]: import numpy as np In [2]: %memit np.zeros(1e7) maximum of 1: 76.402344 MB per loop In [3]: %memit np.ones(1e6) maximum of 1: 7.820312 MB per loop In [4]: %memit -r 10 np.empty(1e8) maximum of 10: 0.101562 MB per loop In [5]: memit -t 3 while True: pass; Subprocess timed out. Subprocess timed out. Subprocess timed out. ERROR: all subprocesses exited unsuccessfully. Try again with the `-i` option. maximum of 1: -inf MB per loop """ opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False) repeat = int(getattr(opts, 'r', 1)) if repeat < 1: repeat == 1 timeout = int(getattr(opts, 't', 0)) if timeout <= 0: timeout = None run_in_place = hasattr(opts, 'i') mem_usage = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout, run_in_place=run_in_place) if mem_usage: print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage))) else: print('ERROR: could not read memory usage, try with a lower interval or more iterations')
python
def magic_memit(self, line=''): """Measure memory usage of a Python statement Usage, in line mode: %memit [-ir<R>t<T>] statement Options: -r<R>: repeat the loop iteration <R> times and take the best result. Default: 1 -i: run the code in the current environment, without forking a new process. This is required on some MacOS versions of Accelerate if your line contains a call to `np.dot`. -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None Examples -------- :: In [1]: import numpy as np In [2]: %memit np.zeros(1e7) maximum of 1: 76.402344 MB per loop In [3]: %memit np.ones(1e6) maximum of 1: 7.820312 MB per loop In [4]: %memit -r 10 np.empty(1e8) maximum of 10: 0.101562 MB per loop In [5]: memit -t 3 while True: pass; Subprocess timed out. Subprocess timed out. Subprocess timed out. ERROR: all subprocesses exited unsuccessfully. Try again with the `-i` option. maximum of 1: -inf MB per loop """ opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False) repeat = int(getattr(opts, 'r', 1)) if repeat < 1: repeat == 1 timeout = int(getattr(opts, 't', 0)) if timeout <= 0: timeout = None run_in_place = hasattr(opts, 'i') mem_usage = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout, run_in_place=run_in_place) if mem_usage: print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage))) else: print('ERROR: could not read memory usage, try with a lower interval or more iterations')
Measure memory usage of a Python statement Usage, in line mode: %memit [-ir<R>t<T>] statement Options: -r<R>: repeat the loop iteration <R> times and take the best result. Default: 1 -i: run the code in the current environment, without forking a new process. This is required on some MacOS versions of Accelerate if your line contains a call to `np.dot`. -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None Examples -------- :: In [1]: import numpy as np In [2]: %memit np.zeros(1e7) maximum of 1: 76.402344 MB per loop In [3]: %memit np.ones(1e6) maximum of 1: 7.820312 MB per loop In [4]: %memit -r 10 np.empty(1e8) maximum of 10: 0.101562 MB per loop In [5]: memit -t 3 while True: pass; Subprocess timed out. Subprocess timed out. Subprocess timed out. ERROR: all subprocesses exited unsuccessfully. Try again with the `-i` option. maximum of 1: -inf MB per loop
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L483-L538
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.add_function
def add_function(self, func): """ Record line profiling information for the given Python function. """ try: # func_code does not exist in Python3 code = func.__code__ except AttributeError: import warnings warnings.warn("Could not extract a code object for the object %r" % (func,)) return if code not in self.code_map: self.code_map[code] = {} self.functions.append(func)
python
def add_function(self, func): """ Record line profiling information for the given Python function. """ try: # func_code does not exist in Python3 code = func.__code__ except AttributeError: import warnings warnings.warn("Could not extract a code object for the object %r" % (func,)) return if code not in self.code_map: self.code_map[code] = {} self.functions.append(func)
Record line profiling information for the given Python function.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L177-L190
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.wrap_function
def wrap_function(self, func): """ Wrap a function to profile it. """ def f(*args, **kwds): self.enable_by_count() try: result = func(*args, **kwds) finally: self.disable_by_count() return result return f
python
def wrap_function(self, func): """ Wrap a function to profile it. """ def f(*args, **kwds): self.enable_by_count() try: result = func(*args, **kwds) finally: self.disable_by_count() return result return f
Wrap a function to profile it.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L192-L203
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.runctx
def runctx(self, cmd, globals, locals): """ Profile a single executable statement in the given namespaces. """ self.enable_by_count() try: exec(cmd, globals, locals) finally: self.disable_by_count() return self
python
def runctx(self, cmd, globals, locals): """ Profile a single executable statement in the given namespaces. """ self.enable_by_count() try: exec(cmd, globals, locals) finally: self.disable_by_count() return self
Profile a single executable statement in the given namespaces.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L212-L220
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.runcall
def runcall(self, func, *args, **kw): """ Profile a single function call. """ # XXX where is this used ? can be removed ? self.enable_by_count() try: return func(*args, **kw) finally: self.disable_by_count()
python
def runcall(self, func, *args, **kw): """ Profile a single function call. """ # XXX where is this used ? can be removed ? self.enable_by_count() try: return func(*args, **kw) finally: self.disable_by_count()
Profile a single function call.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L222-L230
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.disable_by_count
def disable_by_count(self): """ Disable the profiler if the number of disable requests matches the number of enable requests. """ if self.enable_count > 0: self.enable_count -= 1 if self.enable_count == 0: self.disable()
python
def disable_by_count(self): """ Disable the profiler if the number of disable requests matches the number of enable requests. """ if self.enable_count > 0: self.enable_count -= 1 if self.enable_count == 0: self.disable()
Disable the profiler if the number of disable requests matches the number of enable requests.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L239-L246
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
LineProfiler.trace_memory_usage
def trace_memory_usage(self, frame, event, arg): """Callback for sys.settrace""" if event in ('line', 'return') and frame.f_code in self.code_map: lineno = frame.f_lineno if event == 'return': lineno += 1 entry = self.code_map[frame.f_code].setdefault(lineno, []) entry.append(_get_memory(os.getpid())) return self.trace_memory_usage
python
def trace_memory_usage(self, frame, event, arg): """Callback for sys.settrace""" if event in ('line', 'return') and frame.f_code in self.code_map: lineno = frame.f_lineno if event == 'return': lineno += 1 entry = self.code_map[frame.f_code].setdefault(lineno, []) entry.append(_get_memory(os.getpid())) return self.trace_memory_usage
Callback for sys.settrace
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L248-L257
lrq3000/pyFileFixity
setup.py
parse_makefile_aliases
def parse_makefile_aliases(filepath): ''' Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias. ''' # -- Parsing the Makefile using ConfigParser # Adding a fake section to make the Makefile a valid Ini file ini_str = '[root]\n' with open(filepath, 'r') as fd: ini_str = ini_str + fd.read().replace('@make ', '') ini_fp = StringIO.StringIO(ini_str) # Parse using ConfigParser config = ConfigParser.RawConfigParser() config.readfp(ini_fp) # Fetch the list of aliases aliases = config.options('root') # -- Extracting commands for each alias commands = {} for alias in aliases: # strip the first line return, and then split by any line return commands[alias] = config.get('root', alias).lstrip('\n').split('\n') # -- Commands substitution # Loop until all aliases are substituted by their commands: # Check each command of each alias, and if there is one command that is to # be substituted by an alias, try to do it right away. If this is not # possible because this alias itself points to other aliases , then stop # and put the current alias back in the queue to be processed again later. # Create the queue of aliases to process aliases_todo = commands.keys() # Create the dict that will hold the full commands commands_new = {} # Loop until we have processed all aliases while aliases_todo: # Pick the first alias in the queue alias = aliases_todo.pop(0) # Create a new entry in the resulting dict commands_new[alias] = [] # For each command of this alias for cmd in commands[alias]: # Ignore self-referencing (alias points to itself) if cmd == alias: pass # Substitute full command elif cmd in aliases and cmd in commands_new: # Append all the commands referenced by the alias commands_new[alias].extend(commands_new[cmd]) # Delay substituting another alias, waiting for the other alias to # be substituted first elif cmd in aliases and cmd not in commands_new: # Delete the current entry to avoid other aliases # to reference this one wrongly (as it is empty) del commands_new[alias] aliases_todo.append(alias) break # Full command (no aliases) else: commands_new[alias].append(cmd) commands = commands_new del commands_new # -- Prepending prefix to avoid conflicts with standard setup.py commands # for alias in commands.keys(): # commands['make_'+alias] = commands[alias] # del commands[alias] return commands
python
def parse_makefile_aliases(filepath): ''' Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias. ''' # -- Parsing the Makefile using ConfigParser # Adding a fake section to make the Makefile a valid Ini file ini_str = '[root]\n' with open(filepath, 'r') as fd: ini_str = ini_str + fd.read().replace('@make ', '') ini_fp = StringIO.StringIO(ini_str) # Parse using ConfigParser config = ConfigParser.RawConfigParser() config.readfp(ini_fp) # Fetch the list of aliases aliases = config.options('root') # -- Extracting commands for each alias commands = {} for alias in aliases: # strip the first line return, and then split by any line return commands[alias] = config.get('root', alias).lstrip('\n').split('\n') # -- Commands substitution # Loop until all aliases are substituted by their commands: # Check each command of each alias, and if there is one command that is to # be substituted by an alias, try to do it right away. If this is not # possible because this alias itself points to other aliases , then stop # and put the current alias back in the queue to be processed again later. # Create the queue of aliases to process aliases_todo = commands.keys() # Create the dict that will hold the full commands commands_new = {} # Loop until we have processed all aliases while aliases_todo: # Pick the first alias in the queue alias = aliases_todo.pop(0) # Create a new entry in the resulting dict commands_new[alias] = [] # For each command of this alias for cmd in commands[alias]: # Ignore self-referencing (alias points to itself) if cmd == alias: pass # Substitute full command elif cmd in aliases and cmd in commands_new: # Append all the commands referenced by the alias commands_new[alias].extend(commands_new[cmd]) # Delay substituting another alias, waiting for the other alias to # be substituted first elif cmd in aliases and cmd not in commands_new: # Delete the current entry to avoid other aliases # to reference this one wrongly (as it is empty) del commands_new[alias] aliases_todo.append(alias) break # Full command (no aliases) else: commands_new[alias].append(cmd) commands = commands_new del commands_new # -- Prepending prefix to avoid conflicts with standard setup.py commands # for alias in commands.keys(): # commands['make_'+alias] = commands[alias] # del commands[alias] return commands
Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/setup.py#L30-L101
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/kthread.py
KThread.start
def start(self): """Start the thread.""" self.__run_backup = self.run self.run = self.__run # Force the Thread to install our trace. threading.Thread.start(self)
python
def start(self): """Start the thread.""" self.__run_backup = self.run self.run = self.__run # Force the Thread to install our trace. threading.Thread.start(self)
Start the thread.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/kthread.py#L43-L47
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/kthread.py
KThread.__run
def __run(self): """Hacked run function, which installs the trace.""" sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup
python
def __run(self): """Hacked run function, which installs the trace.""" sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup
Hacked run function, which installs the trace.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/kthread.py#L49-L53
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/mprofile.py
MProfiler.codepoint_included
def codepoint_included(self, codepoint): """Check if codepoint matches any of the defined codepoints.""" if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False
python
def codepoint_included(self, codepoint): """Check if codepoint matches any of the defined codepoints.""" if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False
Check if codepoint matches any of the defined codepoints.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/mprofile.py#L49-L61
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/mprofile.py
MProfiler.profile
def profile(self, frame, event, arg): #PYCHOK arg requ. to match signature """Profiling method used to profile matching codepoints and events.""" if (self.events == None) or (event in self.events): frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0,0,0,0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
python
def profile(self, frame, event, arg): #PYCHOK arg requ. to match signature """Profiling method used to profile matching codepoints and events.""" if (self.events == None) or (event in self.events): frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0,0,0,0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
Profiling method used to profile matching codepoints and events.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/mprofile.py#L63-L81
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
runprofile
def runprofile(mainfunction, output, timeout = 0, calibrate=False): ''' Run the functions profiler and save the result If timeout is greater than 0, the profile will automatically stops after timeout seconds ''' if noprofiler == True: print('ERROR: profiler and/or pstats library missing ! Please install it (probably package named python-profile) before running a profiling !') return False # This is the main function for profiling def _profile(): profile.run(mainfunction, output) print('=> RUNNING FUNCTIONS PROFILER\n\n'); sys.stdout.flush(); # Calibrate the profiler (only use this if the profiler produces some funny stuff, but calibration can also produce even more funny stuff with the latest cProfile of Python v2.7! So you should only enable calibration if necessary) if calibrate: print('Calibrating the profiler...'); sys.stdout.flush(); cval = calibrateprofile() print('Calibration found value : %s' % cval); sys.stdout.flush(); print('Initializing the profiler...'); sys.stdout.flush(); # Run in timeout mode (if the function cannot ends by itself, this is the best mode: the function must ends for the profile to be saved) if timeout > 0: pthread = KThread(target=_profile) # we open the function with the profiler, in a special killable thread (see below why) print('Will now run the profiling and terminate it in %s seconds. Results will be saved in %s' % (str(timeout), str(output))); sys.stdout.flush(); print('\nCountdown:'); sys.stdout.flush(); for i in range(0,5): print(str(5-i)) sys.stdout.flush() time.sleep(1) print('0\nStarting to profile...'); sys.stdout.flush(); pthread.start() # starting the thread time.sleep(float(timeout)) # after this amount of seconds, the thread gets killed and the profiler will end its job print('\n\nFinishing the profile and saving to the file %s' % str(output)); sys.stdout.flush(); pthread.kill() # we must end the main function in order for the profiler to output its results (if we didn't launch a thread and just closed the process, it would have done no result) # Run in full length mode (we run the function until it ends) else: print("Running the profiler, please wait until the process terminates by itself (if you forcequit before, the profile won't be saved)") _profile() print('=> Functions Profile done !') return True
python
def runprofile(mainfunction, output, timeout = 0, calibrate=False): ''' Run the functions profiler and save the result If timeout is greater than 0, the profile will automatically stops after timeout seconds ''' if noprofiler == True: print('ERROR: profiler and/or pstats library missing ! Please install it (probably package named python-profile) before running a profiling !') return False # This is the main function for profiling def _profile(): profile.run(mainfunction, output) print('=> RUNNING FUNCTIONS PROFILER\n\n'); sys.stdout.flush(); # Calibrate the profiler (only use this if the profiler produces some funny stuff, but calibration can also produce even more funny stuff with the latest cProfile of Python v2.7! So you should only enable calibration if necessary) if calibrate: print('Calibrating the profiler...'); sys.stdout.flush(); cval = calibrateprofile() print('Calibration found value : %s' % cval); sys.stdout.flush(); print('Initializing the profiler...'); sys.stdout.flush(); # Run in timeout mode (if the function cannot ends by itself, this is the best mode: the function must ends for the profile to be saved) if timeout > 0: pthread = KThread(target=_profile) # we open the function with the profiler, in a special killable thread (see below why) print('Will now run the profiling and terminate it in %s seconds. Results will be saved in %s' % (str(timeout), str(output))); sys.stdout.flush(); print('\nCountdown:'); sys.stdout.flush(); for i in range(0,5): print(str(5-i)) sys.stdout.flush() time.sleep(1) print('0\nStarting to profile...'); sys.stdout.flush(); pthread.start() # starting the thread time.sleep(float(timeout)) # after this amount of seconds, the thread gets killed and the profiler will end its job print('\n\nFinishing the profile and saving to the file %s' % str(output)); sys.stdout.flush(); pthread.kill() # we must end the main function in order for the profiler to output its results (if we didn't launch a thread and just closed the process, it would have done no result) # Run in full length mode (we run the function until it ends) else: print("Running the profiler, please wait until the process terminates by itself (if you forcequit before, the profile won't be saved)") _profile() print('=> Functions Profile done !') return True
Run the functions profiler and save the result If timeout is greater than 0, the profile will automatically stops after timeout seconds
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L52-L89
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
calibrateprofile
def calibrateprofile(): ''' Calibrate the profiler (necessary to have non negative and more exact values) ''' pr = profile.Profile() calib = [] crepeat = 10 for i in range(crepeat): calib.append(pr.calibrate(10000)) final = sum(calib) / crepeat profile.Profile.bias = final # Apply computed bias to all Profile instances created hereafter return final
python
def calibrateprofile(): ''' Calibrate the profiler (necessary to have non negative and more exact values) ''' pr = profile.Profile() calib = [] crepeat = 10 for i in range(crepeat): calib.append(pr.calibrate(10000)) final = sum(calib) / crepeat profile.Profile.bias = final # Apply computed bias to all Profile instances created hereafter return final
Calibrate the profiler (necessary to have non negative and more exact values)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L91-L102
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
parseprofile
def parseprofile(profilelog, out): ''' Parse a profile log and print the result on screen ''' file = open(out, 'w') # opening the output file print('Opening the profile in %s...' % profilelog) p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file print('Generating the stats, please wait...') file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats('cumulative').print_stats(100) file.write("=== Time:\n") p.sort_stats('time').print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats('time', 'cum').print_stats(.5, 'init') file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() #p.print_callers(.5, 'init') #p.add('fooprof') file.close() print('Stats generated and saved to %s.' % out) print('Everything is done. Exiting')
python
def parseprofile(profilelog, out): ''' Parse a profile log and print the result on screen ''' file = open(out, 'w') # opening the output file print('Opening the profile in %s...' % profilelog) p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file print('Generating the stats, please wait...') file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats('cumulative').print_stats(100) file.write("=== Time:\n") p.sort_stats('time').print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats('time', 'cum').print_stats(.5, 'init') file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() #p.print_callers(.5, 'init') #p.add('fooprof') file.close() print('Stats generated and saved to %s.' % out) print('Everything is done. Exiting')
Parse a profile log and print the result on screen
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L104-L129
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
browseprofile
def browseprofile(profilelog): ''' Browse interactively a profile log in console ''' print('Starting the pstats profile browser...\n') try: browser = ProfileBrowser(profilelog) print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started." browser.cmdloop() print >> browser.stream, "Goodbye." except KeyboardInterrupt: pass
python
def browseprofile(profilelog): ''' Browse interactively a profile log in console ''' print('Starting the pstats profile browser...\n') try: browser = ProfileBrowser(profilelog) print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started." browser.cmdloop() print >> browser.stream, "Goodbye." except KeyboardInterrupt: pass
Browse interactively a profile log in console
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L131-L142
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
browseprofilegui
def browseprofilegui(profilelog): ''' Browse interactively a profile log in GUI using RunSnakeRun and SquareMap ''' from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing) app = runsnake.RunSnakeRunApp(0) app.OnInit(profilelog) #app.OnInit() app.MainLoop()
python
def browseprofilegui(profilelog): ''' Browse interactively a profile log in GUI using RunSnakeRun and SquareMap ''' from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing) app = runsnake.RunSnakeRunApp(0) app.OnInit(profilelog) #app.OnInit() app.MainLoop()
Browse interactively a profile log in GUI using RunSnakeRun and SquareMap
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L144-L152
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
rwh_primes1
def rwh_primes1(n): # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 ''' Returns a list of primes < n ''' sieve = [True] * (n/2) for i in _range(3,int(n**0.5)+1,2): if sieve[i/2]: sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1) return [2] + [2*i+1 for i in _range(1,n/2) if sieve[i]]
python
def rwh_primes1(n): # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 ''' Returns a list of primes < n ''' sieve = [True] * (n/2) for i in _range(3,int(n**0.5)+1,2): if sieve[i/2]: sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1) return [2] + [2*i+1 for i in _range(1,n/2) if sieve[i]]
Returns a list of primes < n
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L60-L67
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
find_prime_polynomials
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False): '''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.''' # fast_primes will output less results but will be significantly faster. # single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that. # A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows. # Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique. # For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial. # Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial. # Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow). # Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf # Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986. # Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field root_charac = 2 # we're in GF(2) field_charac = int(root_charac**c_exp - 1) field_charac_next = int(root_charac**(c_exp+1) - 1) prim_candidates = [] if fast_primes: prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes else: prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible) # Start of the main loop correct_primes = [] for prim in prim_candidates: # try potential candidates primitive irreducible polys seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default) conflict = False # flag to know if there was at least one conflict # Second loop, build the whole Galois Field x = GF2int(1) for i in _range(field_charac): # Compute the next value in the field (ie, the next power of alpha/generator) x = x.multiply(generator, prim, field_charac+1) # Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime) if x > field_charac or seen[x] == 1: conflict = True break # Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha else: seen[x] = 1 # End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial! if not conflict: correct_primes.append(prim) if single: return prim # Return the list of all prime polynomials return correct_primes
python
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False): '''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.''' # fast_primes will output less results but will be significantly faster. # single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that. # A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows. # Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique. # For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial. # Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial. # Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow). # Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf # Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986. # Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field root_charac = 2 # we're in GF(2) field_charac = int(root_charac**c_exp - 1) field_charac_next = int(root_charac**(c_exp+1) - 1) prim_candidates = [] if fast_primes: prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes else: prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible) # Start of the main loop correct_primes = [] for prim in prim_candidates: # try potential candidates primitive irreducible polys seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default) conflict = False # flag to know if there was at least one conflict # Second loop, build the whole Galois Field x = GF2int(1) for i in _range(field_charac): # Compute the next value in the field (ie, the next power of alpha/generator) x = x.multiply(generator, prim, field_charac+1) # Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime) if x > field_charac or seen[x] == 1: conflict = True break # Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha else: seen[x] = 1 # End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial! if not conflict: correct_primes.append(prim) if single: return prim # Return the list of all prime polynomials return correct_primes
Compute the list of prime polynomials for the given generator and galois field characteristic exponent.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L69-L121
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
init_lut
def init_lut(generator=3, prim=0x11b, c_exp=8): '''Precompute the logarithm and anti-log (look-up) tables for faster computation later, using the provided primitive polynomial. These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2. The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y. That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables. For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks ''' # generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books. # prim is the primitive/prime (binary) polynomial and must be irreducible (it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between 0 and 255, and not a list of gf values. For more infos: http://research.swtch.com/field # note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables). global GF2int_exptable, GF2int_logtable, GF2_charac, GF2_c_exp GF2_charac = int(2**c_exp - 1) GF2_c_exp = int(c_exp) exptable = [-1] * (GF2_charac+1) # anti-log (exponential) table. The first two elements will always be [GF2int(1), generator] logtable = [-1] * (GF2_charac+1) # log table, log[0] is impossible and thus unused # Construct the anti-log table # It's basically the cumulative product of 1 by the generator number, on and on and on until you have walked through the whole field. # That's why exptable is always dense (all entries are filled), but logtable may be sparse (lots of empty values, because multiple logtable's entries point to the same exptable's entry). g = GF2int(1) for i in range(GF2_charac+1): # note that the last item of exptable will always be equal to the first item in the table, because g^p==g^0 because of the modulo p (we're in a finite field!). exptable[i] = g # compute anti-log for this value and store it in a table #logtable[g] = i # compute logtable at the same time as exptable (but log[1] will always be equal to g^255, which may be weird when compared to lists of logtables online but this is equivalent) g = g.multiply(generator, prim, GF2_charac+1) # equivalent to: g = generator**(i+1) # Construct the log table # Ignore the last element of the field because fields wrap back around. # The log of 1 can have two values: either g^0 (the exact value change depending on parameters) or it could be 255 (g^255=1) because of the wraparound # Note that either way, this does not change anything any output later (ie, the ecc symbols will be the same either way). for i, x in enumerate(exptable[:-1]): logtable[x] = i # Optimization: convert to integer arrays GF2int_exptable = array.array('i', exptable) GF2int_logtable = array.array('i', logtable) return GF2int_exptable, GF2int_logtable
python
def init_lut(generator=3, prim=0x11b, c_exp=8): '''Precompute the logarithm and anti-log (look-up) tables for faster computation later, using the provided primitive polynomial. These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2. The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y. That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables. For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks ''' # generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books. # prim is the primitive/prime (binary) polynomial and must be irreducible (it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between 0 and 255, and not a list of gf values. For more infos: http://research.swtch.com/field # note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables). global GF2int_exptable, GF2int_logtable, GF2_charac, GF2_c_exp GF2_charac = int(2**c_exp - 1) GF2_c_exp = int(c_exp) exptable = [-1] * (GF2_charac+1) # anti-log (exponential) table. The first two elements will always be [GF2int(1), generator] logtable = [-1] * (GF2_charac+1) # log table, log[0] is impossible and thus unused # Construct the anti-log table # It's basically the cumulative product of 1 by the generator number, on and on and on until you have walked through the whole field. # That's why exptable is always dense (all entries are filled), but logtable may be sparse (lots of empty values, because multiple logtable's entries point to the same exptable's entry). g = GF2int(1) for i in range(GF2_charac+1): # note that the last item of exptable will always be equal to the first item in the table, because g^p==g^0 because of the modulo p (we're in a finite field!). exptable[i] = g # compute anti-log for this value and store it in a table #logtable[g] = i # compute logtable at the same time as exptable (but log[1] will always be equal to g^255, which may be weird when compared to lists of logtables online but this is equivalent) g = g.multiply(generator, prim, GF2_charac+1) # equivalent to: g = generator**(i+1) # Construct the log table # Ignore the last element of the field because fields wrap back around. # The log of 1 can have two values: either g^0 (the exact value change depending on parameters) or it could be 255 (g^255=1) because of the wraparound # Note that either way, this does not change anything any output later (ie, the ecc symbols will be the same either way). for i, x in enumerate(exptable[:-1]): logtable[x] = i # Optimization: convert to integer arrays GF2int_exptable = array.array('i', exptable) GF2int_logtable = array.array('i', logtable) return GF2int_exptable, GF2int_logtable
Precompute the logarithm and anti-log (look-up) tables for faster computation later, using the provided primitive polynomial. These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2. The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y. That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables. For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L123-L159
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
GF2int._to_binpoly
def _to_binpoly(x): '''Convert a Galois Field's number into a nice polynomial''' if x <= 0: return "0" b = 1 # init to 2^0 = 1 c = [] # stores the degrees of each term of the polynomials i = 0 # counter for b = 2^i while x > 0: b = (1 << i) # generate a number power of 2: 2^0, 2^1, 2^2, ..., 2^i. Equivalent to b = 2^i if x & b : # then check if x is divisible by the power of 2. Equivalent to x % 2^i == 0 # If yes, then... c.append(i) # append this power (i, the exponent, gives us the coefficient) x ^= b # and compute the remainder of x / b i = i+1 # increment to compute the next power of 2 return " + ".join(["x^%i" % y for y in c[::-1]])
python
def _to_binpoly(x): '''Convert a Galois Field's number into a nice polynomial''' if x <= 0: return "0" b = 1 # init to 2^0 = 1 c = [] # stores the degrees of each term of the polynomials i = 0 # counter for b = 2^i while x > 0: b = (1 << i) # generate a number power of 2: 2^0, 2^1, 2^2, ..., 2^i. Equivalent to b = 2^i if x & b : # then check if x is divisible by the power of 2. Equivalent to x % 2^i == 0 # If yes, then... c.append(i) # append this power (i, the exponent, gives us the coefficient) x ^= b # and compute the remainder of x / b i = i+1 # increment to compute the next power of 2 return " + ".join(["x^%i" % y for y in c[::-1]])
Convert a Galois Field's number into a nice polynomial
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L250-L263
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
GF2int.multiply
def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True): '''A slow multiply method. This method gives the same results as the other __mul__ method but without needing precomputed tables, thus it can be used to generate those tables. If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations). This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together. The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are: 1- carry-less addition and substraction (XOR in GF(2^p)) 2- modular reduction (to avoid duplicate values in the field) using a prime polynomial ''' r = 0 a = int(a) b = int(b) while b: # while b is not 0 if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!). b = b >> 1 # equivalent to b // 2 a = a << 1 # equivalent to a*2 if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR). return GF2int(r)
python
def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True): '''A slow multiply method. This method gives the same results as the other __mul__ method but without needing precomputed tables, thus it can be used to generate those tables. If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations). This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together. The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are: 1- carry-less addition and substraction (XOR in GF(2^p)) 2- modular reduction (to avoid duplicate values in the field) using a prime polynomial ''' r = 0 a = int(a) b = int(b) while b: # while b is not 0 if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!). b = b >> 1 # equivalent to b // 2 a = a << 1 # equivalent to a*2 if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR). return GF2int(r)
A slow multiply method. This method gives the same results as the other __mul__ method but without needing precomputed tables, thus it can be used to generate those tables. If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations). This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together. The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are: 1- carry-less addition and substraction (XOR in GF(2^p)) 2- modular reduction (to avoid duplicate values in the field) using a prime polynomial
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L265-L287
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/ff.py
GF2int.multiply_slow
def multiply_slow(x, y, prim=0x11b): '''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.''' ### Define bitwise carry-less operations as inner functions ### def cl_mult(x,y): '''Bitwise carry-less multiplication on integers''' z = 0 i = 0 while (y>>i) > 0: if y & (1<<i): z ^= x<<i i += 1 return z def bit_length(n): '''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()''' bits = 0 while n >> bits: bits += 1 return bits def cl_div(dividend, divisor=None): '''Bitwise carry-less long division on integers and returns the remainder''' # Compute the position of the most significant bit for each integers dl1 = bit_length(dividend) dl2 = bit_length(divisor) # If the dividend is smaller than the divisor, just exit if dl1 < dl2: return dividend # Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor) for i in _range(dl1-dl2,-1,-1): # Check that the dividend is divisible (useless for the first iteration but important for the next ones) if dividend & (1 << i+dl2-1): # If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction) dividend ^= divisor << i return dividend ### Main GF multiplication routine ### # Multiply the gf numbers result = cl_mult(x,y) # Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds if prim > 0: result = cl_div(result, prim) return result
python
def multiply_slow(x, y, prim=0x11b): '''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.''' ### Define bitwise carry-less operations as inner functions ### def cl_mult(x,y): '''Bitwise carry-less multiplication on integers''' z = 0 i = 0 while (y>>i) > 0: if y & (1<<i): z ^= x<<i i += 1 return z def bit_length(n): '''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()''' bits = 0 while n >> bits: bits += 1 return bits def cl_div(dividend, divisor=None): '''Bitwise carry-less long division on integers and returns the remainder''' # Compute the position of the most significant bit for each integers dl1 = bit_length(dividend) dl2 = bit_length(divisor) # If the dividend is smaller than the divisor, just exit if dl1 < dl2: return dividend # Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor) for i in _range(dl1-dl2,-1,-1): # Check that the dividend is divisible (useless for the first iteration but important for the next ones) if dividend & (1 << i+dl2-1): # If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction) dividend ^= divisor << i return dividend ### Main GF multiplication routine ### # Multiply the gf numbers result = cl_mult(x,y) # Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds if prim > 0: result = cl_div(result, prim) return result
Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L289-L334
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/_meliaejson.py
loads
def loads( source ): """Load json structure from meliae from source Supports only the required structures to support loading meliae memory dumps """ source = source.strip() assert source.startswith( '{' ) assert source.endswith( '}' ) source = source[1:-1] result = {} for match in attr.finditer( source ): key = match.group('key') if match.group( 'list' ) is not None: value = [ int(x) for x in match.group( 'list' ).strip().replace(',',' ').split() ] elif match.group( 'int' ) is not None: value = int( match.group( 'int' )) elif match.group( 'string' ) is not None: def deescape( match ): return unichr( int( match.group(0)[2:], 16 )) value = match.group('string').decode( 'utf-8' ) value = escape.sub( deescape, value, ) value = simple_escape.sub( lambda x: x.group(1), value, ) else: raise RuntimeError( "Matched something we don't know how to process:", match.groupdict() ) result[key] = value return result
python
def loads( source ): """Load json structure from meliae from source Supports only the required structures to support loading meliae memory dumps """ source = source.strip() assert source.startswith( '{' ) assert source.endswith( '}' ) source = source[1:-1] result = {} for match in attr.finditer( source ): key = match.group('key') if match.group( 'list' ) is not None: value = [ int(x) for x in match.group( 'list' ).strip().replace(',',' ').split() ] elif match.group( 'int' ) is not None: value = int( match.group( 'int' )) elif match.group( 'string' ) is not None: def deescape( match ): return unichr( int( match.group(0)[2:], 16 )) value = match.group('string').decode( 'utf-8' ) value = escape.sub( deescape, value, ) value = simple_escape.sub( lambda x: x.group(1), value, ) else: raise RuntimeError( "Matched something we don't know how to process:", match.groupdict() ) result[key] = value return result
Load json structure from meliae from source Supports only the required structures to support loading meliae memory dumps
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/_meliaejson.py#L40-L74
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/rs.py
RSCoder.encode
def encode(self, message, poly=False, k=None, return_string=True): '''Encode a given string or list of values (between 0 and gf2_charac) with reed-solomon encoding. Returns a list of values (or a string if return_string is true) with the k message bytes and n-k parity bytes at the end. If a message is < k bytes long, it is assumed to be padded at the front with null bytes. The sequence returned is always n bytes long. If poly is not False, returns the encoded Polynomial object instead of the polynomial translated back to a string (useful for debugging) ''' n = self.n if not k: k = self.k if len(message)>k: raise ValueError("Message length is max %d. Message was %d" % (k, len(message))) # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(message, _str): message = [ord(x) for x in message] # Encode message as a polynomial: m = Polynomial([GF2int(x) for x in message]) # Shift polynomial up by n-k by multiplying by x^(n-k) to reserve the first n-k coefficients for the ecc. This effectively pad the message with \0 bytes for the lower coefficients (where the ecc will be placed). mprime = m * Polynomial([GF2int(1)] + [GF2int(0)]*(n-k)) # mprime = q*g + b for some q # so let's find b, the code word (ecc block): b = mprime % self.g[k] # Subtract out b, so now c = q*g, which is a way of xoring mprime and code word b, which is a way of just saying that we append the polynomial ecc code to the original message (we replace the padded 0 bytes of mprime with the code word) c = mprime - b # Since c is a multiple of g, it has (at least) n-k roots: α^1 through # α^(n-k) if not poly: # Turn the polynomial c back into a string ret = self._list_rjust(c.coefficients, n, 0) # rjust is useful for the nostrip feature if return_string and self.gf2_charac < 256: ret = self._list2str(ret) else: ret = c return ret
python
def encode(self, message, poly=False, k=None, return_string=True): '''Encode a given string or list of values (between 0 and gf2_charac) with reed-solomon encoding. Returns a list of values (or a string if return_string is true) with the k message bytes and n-k parity bytes at the end. If a message is < k bytes long, it is assumed to be padded at the front with null bytes. The sequence returned is always n bytes long. If poly is not False, returns the encoded Polynomial object instead of the polynomial translated back to a string (useful for debugging) ''' n = self.n if not k: k = self.k if len(message)>k: raise ValueError("Message length is max %d. Message was %d" % (k, len(message))) # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(message, _str): message = [ord(x) for x in message] # Encode message as a polynomial: m = Polynomial([GF2int(x) for x in message]) # Shift polynomial up by n-k by multiplying by x^(n-k) to reserve the first n-k coefficients for the ecc. This effectively pad the message with \0 bytes for the lower coefficients (where the ecc will be placed). mprime = m * Polynomial([GF2int(1)] + [GF2int(0)]*(n-k)) # mprime = q*g + b for some q # so let's find b, the code word (ecc block): b = mprime % self.g[k] # Subtract out b, so now c = q*g, which is a way of xoring mprime and code word b, which is a way of just saying that we append the polynomial ecc code to the original message (we replace the padded 0 bytes of mprime with the code word) c = mprime - b # Since c is a multiple of g, it has (at least) n-k roots: α^1 through # α^(n-k) if not poly: # Turn the polynomial c back into a string ret = self._list_rjust(c.coefficients, n, 0) # rjust is useful for the nostrip feature if return_string and self.gf2_charac < 256: ret = self._list2str(ret) else: ret = c return ret
Encode a given string or list of values (between 0 and gf2_charac) with reed-solomon encoding. Returns a list of values (or a string if return_string is true) with the k message bytes and n-k parity bytes at the end. If a message is < k bytes long, it is assumed to be padded at the front with null bytes. The sequence returned is always n bytes long. If poly is not False, returns the encoded Polynomial object instead of the polynomial translated back to a string (useful for debugging)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L115-L162
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/rs.py
RSCoder.check
def check(self, r, k=None): '''Verifies the codeword is valid by testing that the codeword (message+ecc) as a polynomial code divides g returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial c = Polynomial([GF2int(x) for x in r]) # This works too, but takes longer. Both checks are just as valid. #return (c*h)%gtimesh == Polynomial(x0=0) # Since all codewords are multiples of g, checking that codeword c divides g # suffices for validating a codeword. return c % g == Polynomial(x0=0)
python
def check(self, r, k=None): '''Verifies the codeword is valid by testing that the codeword (message+ecc) as a polynomial code divides g returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial c = Polynomial([GF2int(x) for x in r]) # This works too, but takes longer. Both checks are just as valid. #return (c*h)%gtimesh == Polynomial(x0=0) # Since all codewords are multiples of g, checking that codeword c divides g # suffices for validating a codeword. return c % g == Polynomial(x0=0)
Verifies the codeword is valid by testing that the codeword (message+ecc) as a polynomial code divides g returns True/False
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L202-L223
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/rs.py
RSCoder.check_fast
def check_fast(self, r, k=None): '''Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered. returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial r = Polynomial([GF2int(x) for x in r]) # Compute the syndromes: sz = self._syndromes(r, k=k) # Checking that the syndrome is all 0 is sufficient to check if there are no more any errors in the decoded message #return all(int(x) == 0 for x in sz) return sz.coefficients.count(GF2int(0)) == len(sz)
python
def check_fast(self, r, k=None): '''Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered. returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial r = Polynomial([GF2int(x) for x in r]) # Compute the syndromes: sz = self._syndromes(r, k=k) # Checking that the syndrome is all 0 is sufficient to check if there are no more any errors in the decoded message #return all(int(x) == 0 for x in sz) return sz.coefficients.count(GF2int(0)) == len(sz)
Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered. returns True/False
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L225-L246