repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
lrq3000/pyFileFixity | pyFileFixity/lib/distance/distance/_simpledists.py | jaccard | def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2)) | python | def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2)) | Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_simpledists.py#L27-L34 |
lrq3000/pyFileFixity | pyFileFixity/lib/distance/distance/_simpledists.py | sorensen | def sorensen(seq1, seq2):
"""Compute the Sorensen distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - (2 * len(set1 & set2) / float(len(set1) + len(set2))) | python | def sorensen(seq1, seq2):
"""Compute the Sorensen distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - (2 * len(set1 & set2) / float(len(set1) + len(set2))) | Compute the Sorensen distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_simpledists.py#L37-L44 |
lrq3000/pyFileFixity | pyFileFixity/lib/distance/distance/_lcsubstrings.py | lcsubstrings | def lcsubstrings(seq1, seq2, positions=False):
"""Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)])
"""
L1, L2 = len(seq1), len(seq2)
ms = []
mlen = last = 0
if L1 < L2:
seq1, seq2 = seq2, seq1
L1, L2 = L2, L1
column = array('L', range(L2))
for i in range(L1):
for j in range(L2):
old = column[j]
if seq1[i] == seq2[j]:
if i == 0 or j == 0:
column[j] = 1
else:
column[j] = last + 1
if column[j] > mlen:
mlen = column[j]
ms = [(i, j)]
elif column[j] == mlen:
ms.append((i, j))
else:
column[j] = 0
last = old
if positions:
return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms))
return set(seq1[i - mlen + 1:i + 1] for i, _ in ms if ms) | python | def lcsubstrings(seq1, seq2, positions=False):
"""Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)])
"""
L1, L2 = len(seq1), len(seq2)
ms = []
mlen = last = 0
if L1 < L2:
seq1, seq2 = seq2, seq1
L1, L2 = L2, L1
column = array('L', range(L2))
for i in range(L1):
for j in range(L2):
old = column[j]
if seq1[i] == seq2[j]:
if i == 0 or j == 0:
column[j] = 1
else:
column[j] = last + 1
if column[j] > mlen:
mlen = column[j]
ms = [(i, j)]
elif column[j] == mlen:
ms.append((i, j))
else:
column[j] = 0
last = old
if positions:
return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms))
return set(seq1[i - mlen + 1:i + 1] for i, _ in ms if ms) | Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)]) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_lcsubstrings.py#L6-L51 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/coldshotadapter.py | BaseColdshotAdapter.background_color | def background_color(self, node, depth):
"""Create a (unique-ish) background color for each node"""
if self.color_mapping is None:
self.color_mapping = {}
color = self.color_mapping.get(node.key)
if color is None:
depth = len(self.color_mapping)
red = (depth * 10) % 255
green = 200 - ((depth * 5) % 200)
blue = (depth * 25) % 200
self.color_mapping[node.key] = color = wx.Colour(red, green, blue)
return color | python | def background_color(self, node, depth):
"""Create a (unique-ish) background color for each node"""
if self.color_mapping is None:
self.color_mapping = {}
color = self.color_mapping.get(node.key)
if color is None:
depth = len(self.color_mapping)
red = (depth * 10) % 255
green = 200 - ((depth * 5) % 200)
blue = (depth * 25) % 200
self.color_mapping[node.key] = color = wx.Colour(red, green, blue)
return color | Create a (unique-ish) background color for each node | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/coldshotadapter.py#L15-L26 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/coldshotadapter.py | BaseColdshotAdapter.SetPercentage | def SetPercentage(self, percent, total):
"""Set whether to display percentage values (and total for doing so)"""
self.percentageView = percent
self.total = total | python | def SetPercentage(self, percent, total):
"""Set whether to display percentage values (and total for doing so)"""
self.percentageView = percent
self.total = total | Set whether to display percentage values (and total for doing so) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/coldshotadapter.py#L28-L31 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/debug.py | runprofilerandshow | def runprofilerandshow(funcname, profilepath, argv='', *args, **kwargs):
'''
Run a functions profiler and show it in a GUI visualisation using RunSnakeRun
Note: can also use calibration for more exact results
'''
functionprofiler.runprofile(funcname+'(\''+argv+'\')', profilepath, *args, **kwargs)
print 'Showing profile (windows should open in the background)'; sys.stdout.flush();
functionprofiler.browseprofilegui(profilepath) | python | def runprofilerandshow(funcname, profilepath, argv='', *args, **kwargs):
'''
Run a functions profiler and show it in a GUI visualisation using RunSnakeRun
Note: can also use calibration for more exact results
'''
functionprofiler.runprofile(funcname+'(\''+argv+'\')', profilepath, *args, **kwargs)
print 'Showing profile (windows should open in the background)'; sys.stdout.flush();
functionprofiler.browseprofilegui(profilepath) | Run a functions profiler and show it in a GUI visualisation using RunSnakeRun
Note: can also use calibration for more exact results | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/debug.py#L35-L42 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/debug.py | callgraph | def callgraph(func):
''' Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph!
'''
import pycallgraph
@functools.wraps(func)
def wrapper(*args, **kwargs):
pycallgraph.start_trace()
func(*args, **kwargs)
pycallgraph.save_dot('callgraph.log')
pycallgraph.make_dot_graph('callgraph.png')
#pycallgraph.make_dot_graph('callgraph.jpg', format='jpg', tool='neato')
return wrapper | python | def callgraph(func):
''' Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph!
'''
import pycallgraph
@functools.wraps(func)
def wrapper(*args, **kwargs):
pycallgraph.start_trace()
func(*args, **kwargs)
pycallgraph.save_dot('callgraph.log')
pycallgraph.make_dot_graph('callgraph.png')
#pycallgraph.make_dot_graph('callgraph.jpg', format='jpg', tool='neato')
return wrapper | Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph! | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/debug.py#L106-L118 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | _long2bytes | def _long2bytes(n, blocksize=0):
"""Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize.
"""
# After much testing, this algorithm was deemed to be the fastest.
s = ''
pack = struct.pack
while n > 0:
### CHANGED FROM '>I' TO '<I'. (DCG)
s = pack('<I', n & 0xffffffffL) + s
### --------------------------
n = n >> 32
# Strip off leading zeros.
for i in range(len(s)):
if s[i] <> '\000':
break
else:
# Only happens when n == 0.
s = '\000'
i = 0
s = s[i:]
# Add back some pad bytes. This could be done more efficiently
# w.r.t. the de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s | python | def _long2bytes(n, blocksize=0):
"""Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize.
"""
# After much testing, this algorithm was deemed to be the fastest.
s = ''
pack = struct.pack
while n > 0:
### CHANGED FROM '>I' TO '<I'. (DCG)
s = pack('<I', n & 0xffffffffL) + s
### --------------------------
n = n >> 32
# Strip off leading zeros.
for i in range(len(s)):
if s[i] <> '\000':
break
else:
# Only happens when n == 0.
s = '\000'
i = 0
s = s[i:]
# Add back some pad bytes. This could be done more efficiently
# w.r.t. the de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s | Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L45-L78 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | XX | def XX(func, a, b, c, d, x, s, ac):
"""Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto.
Rotation is separate from addition to prevent recomputation
(now summed-up in one function).
"""
res = 0L
res = res + a + func(b, c, d)
res = res + x
res = res + ac
res = res & 0xffffffffL
res = _rotateLeft(res, s)
res = res & 0xffffffffL
res = res + b
return res & 0xffffffffL | python | def XX(func, a, b, c, d, x, s, ac):
"""Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto.
Rotation is separate from addition to prevent recomputation
(now summed-up in one function).
"""
res = 0L
res = res + a + func(b, c, d)
res = res + x
res = res + ac
res = res & 0xffffffffL
res = _rotateLeft(res, s)
res = res & 0xffffffffL
res = res + b
return res & 0xffffffffL | Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto.
Rotation is separate from addition to prevent recomputation
(now summed-up in one function). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L129-L146 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | MD5.init | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
# Load magic initialization constants.
self.A = 0x67452301L
self.B = 0xefcdab89L
self.C = 0x98badcfeL
self.D = 0x10325476L | python | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
# Load magic initialization constants.
self.A = 0x67452301L
self.B = 0xefcdab89L
self.C = 0x98badcfeL
self.D = 0x10325476L | Initialize the message-digest and set all fields to zero. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L179-L189 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | MD5._transform | def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, 0xD76AA478L) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, 0xE8C7B756L) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, 0x242070DBL) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, 0xC1BDCEEEL) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, 0xF57C0FAFL) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, 0x4787C62AL) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, 0xA8304613L) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, 0xFD469501L) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, 0x698098D8L) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, 0x8B44F7AFL) # 10
c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) # 11
b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) # 12
a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) # 13
d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) # 14
c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) # 15
b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, 0xF61E2562L) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, 0xC040B340L) # 18
c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, 0xE9B6C7AAL) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, 0xD62F105DL) # 21
d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) # 22
c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, 0xE7D3FBC8L) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, 0x21E1CDE6L) # 25
d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, 0xF4D50D87L) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, 0x455A14EDL) # 28
a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, 0xFCEFA3F8L) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, 0x676F02D9L) # 31
b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, 0xFFFA3942L) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, 0x8771F681L) # 34
c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) # 35
b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, 0xA4BEEA44L) # 37
d = XX(H, d, a, b, c, inp[ 4], S32, 0x4BDECFA9L) # 38
c = XX(H, c, d, a, b, inp[ 7], S33, 0xF6BB4B60L) # 39
b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) # 40
a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) # 41
d = XX(H, d, a, b, c, inp[ 0], S32, 0xEAA127FAL) # 42
c = XX(H, c, d, a, b, inp[ 3], S33, 0xD4EF3085L) # 43
b = XX(H, b, c, d, a, inp[ 6], S34, 0x04881D05L) # 44
a = XX(H, a, b, c, d, inp[ 9], S31, 0xD9D4D039L) # 45
d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) # 46
c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) # 47
b = XX(H, b, c, d, a, inp[ 2], S34, 0xC4AC5665L) # 48
# Round 4.
S41, S42, S43, S44 = 6, 10, 15, 21
a = XX(I, a, b, c, d, inp[ 0], S41, 0xF4292244L) # 49
d = XX(I, d, a, b, c, inp[ 7], S42, 0x432AFF97L) # 50
c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) # 51
b = XX(I, b, c, d, a, inp[ 5], S44, 0xFC93A039L) # 52
a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) # 53
d = XX(I, d, a, b, c, inp[ 3], S42, 0x8F0CCC92L) # 54
c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) # 55
b = XX(I, b, c, d, a, inp[ 1], S44, 0x85845DD1L) # 56
a = XX(I, a, b, c, d, inp[ 8], S41, 0x6FA87E4FL) # 57
d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) # 58
c = XX(I, c, d, a, b, inp[ 6], S43, 0xA3014314L) # 59
b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) # 60
a = XX(I, a, b, c, d, inp[ 4], S41, 0xF7537E82L) # 61
d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) # 62
c = XX(I, c, d, a, b, inp[ 2], S43, 0x2AD7D2BBL) # 63
b = XX(I, b, c, d, a, inp[ 9], S44, 0xEB86D391L) # 64
A = (A + a) & 0xffffffffL
B = (B + b) & 0xffffffffL
C = (C + c) & 0xffffffffL
D = (D + d) & 0xffffffffL
self.A, self.B, self.C, self.D = A, B, C, D | python | def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, 0xD76AA478L) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, 0xE8C7B756L) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, 0x242070DBL) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, 0xC1BDCEEEL) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, 0xF57C0FAFL) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, 0x4787C62AL) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, 0xA8304613L) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, 0xFD469501L) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, 0x698098D8L) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, 0x8B44F7AFL) # 10
c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) # 11
b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) # 12
a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) # 13
d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) # 14
c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) # 15
b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, 0xF61E2562L) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, 0xC040B340L) # 18
c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, 0xE9B6C7AAL) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, 0xD62F105DL) # 21
d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) # 22
c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, 0xE7D3FBC8L) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, 0x21E1CDE6L) # 25
d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, 0xF4D50D87L) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, 0x455A14EDL) # 28
a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, 0xFCEFA3F8L) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, 0x676F02D9L) # 31
b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, 0xFFFA3942L) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, 0x8771F681L) # 34
c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) # 35
b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, 0xA4BEEA44L) # 37
d = XX(H, d, a, b, c, inp[ 4], S32, 0x4BDECFA9L) # 38
c = XX(H, c, d, a, b, inp[ 7], S33, 0xF6BB4B60L) # 39
b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) # 40
a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) # 41
d = XX(H, d, a, b, c, inp[ 0], S32, 0xEAA127FAL) # 42
c = XX(H, c, d, a, b, inp[ 3], S33, 0xD4EF3085L) # 43
b = XX(H, b, c, d, a, inp[ 6], S34, 0x04881D05L) # 44
a = XX(H, a, b, c, d, inp[ 9], S31, 0xD9D4D039L) # 45
d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) # 46
c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) # 47
b = XX(H, b, c, d, a, inp[ 2], S34, 0xC4AC5665L) # 48
# Round 4.
S41, S42, S43, S44 = 6, 10, 15, 21
a = XX(I, a, b, c, d, inp[ 0], S41, 0xF4292244L) # 49
d = XX(I, d, a, b, c, inp[ 7], S42, 0x432AFF97L) # 50
c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) # 51
b = XX(I, b, c, d, a, inp[ 5], S44, 0xFC93A039L) # 52
a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) # 53
d = XX(I, d, a, b, c, inp[ 3], S42, 0x8F0CCC92L) # 54
c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) # 55
b = XX(I, b, c, d, a, inp[ 1], S44, 0x85845DD1L) # 56
a = XX(I, a, b, c, d, inp[ 8], S41, 0x6FA87E4FL) # 57
d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) # 58
c = XX(I, c, d, a, b, inp[ 6], S43, 0xA3014314L) # 59
b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) # 60
a = XX(I, a, b, c, d, inp[ 4], S41, 0xF7537E82L) # 61
d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) # 62
c = XX(I, c, d, a, b, inp[ 2], S43, 0x2AD7D2BBL) # 63
b = XX(I, b, c, d, a, inp[ 9], S44, 0xEB86D391L) # 64
A = (A + a) & 0xffffffffL
B = (B + b) & 0xffffffffL
C = (C + c) & 0xffffffffL
D = (D + d) & 0xffffffffL
self.A, self.B, self.C, self.D = A, B, C, D | Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES! | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L192-L291 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | MD5.update | def update(self, inBuf):
"""Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b).
"""
leninBuf = long(len(inBuf))
# Compute number of bytes mod 64.
index = (self.count[0] >> 3) & 0x3FL
# Update number of bits.
self.count[0] = self.count[0] + (leninBuf << 3)
if self.count[0] < (leninBuf << 3):
self.count[1] = self.count[1] + 1
self.count[1] = self.count[1] + (leninBuf >> 29)
partLen = 64 - index
if leninBuf >= partLen:
self.input[index:] = map(None, inBuf[:partLen])
self._transform(_bytelist2long(self.input))
i = partLen
while i + 63 < leninBuf:
self._transform(_bytelist2long(map(None, inBuf[i:i+64])))
i = i + 64
else:
self.input = map(None, inBuf[i:leninBuf])
else:
i = 0
self.input = self.input + map(None, inBuf) | python | def update(self, inBuf):
"""Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b).
"""
leninBuf = long(len(inBuf))
# Compute number of bytes mod 64.
index = (self.count[0] >> 3) & 0x3FL
# Update number of bits.
self.count[0] = self.count[0] + (leninBuf << 3)
if self.count[0] < (leninBuf << 3):
self.count[1] = self.count[1] + 1
self.count[1] = self.count[1] + (leninBuf >> 29)
partLen = 64 - index
if leninBuf >= partLen:
self.input[index:] = map(None, inBuf[:partLen])
self._transform(_bytelist2long(self.input))
i = partLen
while i + 63 < leninBuf:
self._transform(_bytelist2long(map(None, inBuf[i:i+64])))
i = i + 64
else:
self.input = map(None, inBuf[i:leninBuf])
else:
i = 0
self.input = self.input + map(None, inBuf) | Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L297-L329 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | MD5.digest | def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
A = self.A
B = self.B
C = self.C
D = self.D
input = [] + self.input
count = [] + self.count
index = (self.count[0] >> 3) & 0x3fL
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = ['\200'] + ['\000'] * 63
self.update(padding[:padLen])
# Append length (before padding).
bits = _bytelist2long(self.input[:56]) + count
self._transform(bits)
# Store state in digest.
digest = _long2bytes(self.A << 96, 16)[:4] + \
_long2bytes(self.B << 64, 16)[4:8] + \
_long2bytes(self.C << 32, 16)[8:12] + \
_long2bytes(self.D, 16)[12:]
self.A = A
self.B = B
self.C = C
self.D = D
self.input = input
self.count = count
return digest | python | def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
A = self.A
B = self.B
C = self.C
D = self.D
input = [] + self.input
count = [] + self.count
index = (self.count[0] >> 3) & 0x3fL
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = ['\200'] + ['\000'] * 63
self.update(padding[:padLen])
# Append length (before padding).
bits = _bytelist2long(self.input[:56]) + count
self._transform(bits)
# Store state in digest.
digest = _long2bytes(self.A << 96, 16)[:4] + \
_long2bytes(self.B << 64, 16)[4:8] + \
_long2bytes(self.C << 32, 16)[8:12] + \
_long2bytes(self.D, 16)[12:]
self.A = A
self.B = B
self.C = C
self.D = D
self.input = input
self.count = count
return digest | Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L332-L375 |
lrq3000/pyFileFixity | pyFileFixity/lib/md5py.py | MD5.hexdigest | def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x:"%02x" % x, d)
d = string.join(d, '')
return d | python | def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x:"%02x" % x, d)
d = string.join(d, '')
return d | Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/md5py.py#L378-L392 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py | MeliaeAdapter.value | def value( self, node, parent=None ):
"""Return value used to compare size of this node"""
# this is the *weighted* size/contribution of the node
try:
return node['contribution']
except KeyError, err:
contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1))
node['contribution'] = contribution
return contribution | python | def value( self, node, parent=None ):
"""Return value used to compare size of this node"""
# this is the *weighted* size/contribution of the node
try:
return node['contribution']
except KeyError, err:
contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1))
node['contribution'] = contribution
return contribution | Return value used to compare size of this node | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py#L51-L59 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py | MeliaeAdapter.label | def label( self, node ):
"""Return textual description of this node"""
result = []
if node.get('type'):
result.append( node['type'] )
if node.get('name' ):
result.append( node['name'] )
elif node.get('value') is not None:
result.append( unicode(node['value'])[:32])
if 'module' in node and not node['module'] in result:
result.append( ' in %s'%( node['module'] ))
if node.get( 'size' ):
result.append( '%s'%( mb( node['size'] )))
if node.get( 'totsize' ):
result.append( '(%s)'%( mb( node['totsize'] )))
parent_count = len( node.get('parents',()))
if parent_count > 1:
result.append( '/%s refs'%( parent_count ))
return " ".join(result) | python | def label( self, node ):
"""Return textual description of this node"""
result = []
if node.get('type'):
result.append( node['type'] )
if node.get('name' ):
result.append( node['name'] )
elif node.get('value') is not None:
result.append( unicode(node['value'])[:32])
if 'module' in node and not node['module'] in result:
result.append( ' in %s'%( node['module'] ))
if node.get( 'size' ):
result.append( '%s'%( mb( node['size'] )))
if node.get( 'totsize' ):
result.append( '(%s)'%( mb( node['totsize'] )))
parent_count = len( node.get('parents',()))
if parent_count > 1:
result.append( '/%s refs'%( parent_count ))
return " ".join(result) | Return textual description of this node | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py#L60-L78 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py | MeliaeAdapter.parents | def parents( self, node ):
"""Retrieve/calculate the set of parents for the given node"""
if 'index' in node:
index = node['index']()
parents = list(meliaeloader.children( node, index, 'parents' ))
return parents
return [] | python | def parents( self, node ):
"""Retrieve/calculate the set of parents for the given node"""
if 'index' in node:
index = node['index']()
parents = list(meliaeloader.children( node, index, 'parents' ))
return parents
return [] | Retrieve/calculate the set of parents for the given node | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py#L86-L92 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py | MeliaeAdapter.best_parent | def best_parent( self, node, tree_type=None ):
"""Choose the best parent for a given node"""
parents = self.parents(node)
selected_parent = None
if node['type'] == 'type':
module = ".".join( node['name'].split( '.' )[:-1] )
if module:
for mod in parents:
if mod['type'] == 'module' and mod['name'] == module:
selected_parent = mod
if parents and selected_parent is None:
parents.sort( key = lambda x: self.value(node, x) )
return parents[-1]
return selected_parent | python | def best_parent( self, node, tree_type=None ):
"""Choose the best parent for a given node"""
parents = self.parents(node)
selected_parent = None
if node['type'] == 'type':
module = ".".join( node['name'].split( '.' )[:-1] )
if module:
for mod in parents:
if mod['type'] == 'module' and mod['name'] == module:
selected_parent = mod
if parents and selected_parent is None:
parents.sort( key = lambda x: self.value(node, x) )
return parents[-1]
return selected_parent | Choose the best parent for a given node | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py#L93-L106 |
lrq3000/pyFileFixity | pyFileFixity/lib/gooey/python_bindings/gooey_decorator.py | Gooey | def Gooey(f=None,
advanced=True,
language='english',
show_config=True,
program_name=None,
program_description=None,
default_size=(610, 530),
required_cols=2,
optional_cols=2,
dump_build_config=False,
monospace_display=False):
'''
Decorator for client code's main function.
Serializes argparse data to JSON for use with the Gooey front end
'''
params = locals()
def build(payload):
def run_gooey(self, args=None, namespace=None):
source_path = sys.argv[0]
build_spec = config_generator.create_from_parser(self, source_path, payload_name=payload.__name__, **params)
if dump_build_config:
config_path = os.path.join(os.getcwd(), 'gooey_config.json')
print( 'Writing Build Config to: {}'.format(config_path))
with open(config_path, 'w') as f:
f.write(json.dumps(build_spec, indent=2))
application.run(build_spec)
def inner2(*args, **kwargs):
ArgumentParser.original_parse_args = ArgumentParser.parse_args
ArgumentParser.parse_args = run_gooey
return payload(*args, **kwargs)
inner2.__name__ = payload.__name__
return inner2
def run_without_gooey(func):
return lambda: func()
if IGNORE_COMMAND in sys.argv:
sys.argv.remove(IGNORE_COMMAND)
if callable(f):
return run_without_gooey(f)
return run_without_gooey
if callable(f):
return build(f)
return build | python | def Gooey(f=None,
advanced=True,
language='english',
show_config=True,
program_name=None,
program_description=None,
default_size=(610, 530),
required_cols=2,
optional_cols=2,
dump_build_config=False,
monospace_display=False):
'''
Decorator for client code's main function.
Serializes argparse data to JSON for use with the Gooey front end
'''
params = locals()
def build(payload):
def run_gooey(self, args=None, namespace=None):
source_path = sys.argv[0]
build_spec = config_generator.create_from_parser(self, source_path, payload_name=payload.__name__, **params)
if dump_build_config:
config_path = os.path.join(os.getcwd(), 'gooey_config.json')
print( 'Writing Build Config to: {}'.format(config_path))
with open(config_path, 'w') as f:
f.write(json.dumps(build_spec, indent=2))
application.run(build_spec)
def inner2(*args, **kwargs):
ArgumentParser.original_parse_args = ArgumentParser.parse_args
ArgumentParser.parse_args = run_gooey
return payload(*args, **kwargs)
inner2.__name__ = payload.__name__
return inner2
def run_without_gooey(func):
return lambda: func()
if IGNORE_COMMAND in sys.argv:
sys.argv.remove(IGNORE_COMMAND)
if callable(f):
return run_without_gooey(f)
return run_without_gooey
if callable(f):
return build(f)
return build | Decorator for client code's main function.
Serializes argparse data to JSON for use with the Gooey front end | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/python_bindings/gooey_decorator.py#L25-L74 |
lrq3000/pyFileFixity | pyFileFixity/lib/gooey/gui/component_builder.py | build_components | def build_components(widget_list):
'''
:param widget_list: list of dicts containing widget info (name, type, etc..)
:return: ComponentList
Converts the Json widget information into concrete wx Widget types
'''
required_args, optional_args = partition(widget_list, is_required)
checkbox_args, general_args = partition(map(build_widget, optional_args), is_checkbox)
required_args = map(build_widget, required_args)
optional_args = general_args + checkbox_args
return ComponentList(required_args, optional_args) | python | def build_components(widget_list):
'''
:param widget_list: list of dicts containing widget info (name, type, etc..)
:return: ComponentList
Converts the Json widget information into concrete wx Widget types
'''
required_args, optional_args = partition(widget_list, is_required)
checkbox_args, general_args = partition(map(build_widget, optional_args), is_checkbox)
required_args = map(build_widget, required_args)
optional_args = general_args + checkbox_args
return ComponentList(required_args, optional_args) | :param widget_list: list of dicts containing widget info (name, type, etc..)
:return: ComponentList
Converts the Json widget information into concrete wx Widget types | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/component_builder.py#L10-L23 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | _merge_asized | def _merge_asized(base, other, level=0):
"""
Merge **Asized** instances `base` and `other` into `base`.
"""
ref2key = lambda ref: ref.name.split(':')[0]
base.size += other.size
base.flat += other.flat
if level > 0:
base.name = ref2key(base)
# Add refs from other to base. Any new refs are appended.
base.refs = list(base.refs) # we may need to append items
refs = {}
for ref in base.refs:
refs[ref2key(ref)] = ref
for ref in other.refs:
key = ref2key(ref)
if key in refs:
_merge_asized(refs[key], ref, level=level+1)
else:
# Don't modify existing Asized instances => deepcopy
base.refs.append(deepcopy(ref))
base.refs[-1].name = key | python | def _merge_asized(base, other, level=0):
"""
Merge **Asized** instances `base` and `other` into `base`.
"""
ref2key = lambda ref: ref.name.split(':')[0]
base.size += other.size
base.flat += other.flat
if level > 0:
base.name = ref2key(base)
# Add refs from other to base. Any new refs are appended.
base.refs = list(base.refs) # we may need to append items
refs = {}
for ref in base.refs:
refs[ref2key(ref)] = ref
for ref in other.refs:
key = ref2key(ref)
if key in refs:
_merge_asized(refs[key], ref, level=level+1)
else:
# Don't modify existing Asized instances => deepcopy
base.refs.append(deepcopy(ref))
base.refs[-1].name = key | Merge **Asized** instances `base` and `other` into `base`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L17-L38 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | _merge_objects | def _merge_objects(tref, merged, obj):
"""
Merge the snapshot size information of multiple tracked objects. The
tracked object `obj` is scanned for size information at time `tref`.
The sizes are merged into **Asized** instance `merged`.
"""
size = None
for (timestamp, tsize) in obj.snapshots:
if timestamp == tref:
size = tsize
if size:
_merge_asized(merged, size) | python | def _merge_objects(tref, merged, obj):
"""
Merge the snapshot size information of multiple tracked objects. The
tracked object `obj` is scanned for size information at time `tref`.
The sizes are merged into **Asized** instance `merged`.
"""
size = None
for (timestamp, tsize) in obj.snapshots:
if timestamp == tref:
size = tsize
if size:
_merge_asized(merged, size) | Merge the snapshot size information of multiple tracked objects. The
tracked object `obj` is scanned for size information at time `tref`.
The sizes are merged into **Asized** instance `merged`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L41-L52 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | _format_trace | def _format_trace(trace):
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for fname, lineno, func, src, _ in trace:
if src:
for line in src:
lines.append(' '+line.strip()+'\n')
lines.append(' %s:%4d in %s\n' % (fname, lineno, func))
return ''.join(lines) | python | def _format_trace(trace):
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for fname, lineno, func, src, _ in trace:
if src:
for line in src:
lines.append(' '+line.strip()+'\n')
lines.append(' %s:%4d in %s\n' % (fname, lineno, func))
return ''.join(lines) | Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L55-L68 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | Stats.load_stats | def load_stats(self, fdump):
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, type('')):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = [] | python | def load_stats(self, fdump):
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, type('')):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = [] | Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L102-L112 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | Stats.dump_stats | def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close() | python | def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close() | Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L115-L130 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | Stats._init_sort | def _init_sort(self):
"""
Prepare the data to be sorted.
If not yet sorted, import all tracked objects from the tracked index.
Extend the tracking information by implicit information to make
sorting easier (DSU pattern).
"""
if not self.sorted:
# Identify the snapshot that tracked the largest amount of memory.
tmax = None
maxsize = 0
for snapshot in self.snapshots:
if snapshot.tracked_total > maxsize:
tmax = snapshot.timestamp
for key in list(self.index.keys()):
for tobj in self.index[key]:
tobj.classname = key
tobj.size = tobj.get_max_size()
tobj.tsize = tobj.get_size_at_time(tmax)
self.sorted.extend(self.index[key]) | python | def _init_sort(self):
"""
Prepare the data to be sorted.
If not yet sorted, import all tracked objects from the tracked index.
Extend the tracking information by implicit information to make
sorting easier (DSU pattern).
"""
if not self.sorted:
# Identify the snapshot that tracked the largest amount of memory.
tmax = None
maxsize = 0
for snapshot in self.snapshots:
if snapshot.tracked_total > maxsize:
tmax = snapshot.timestamp
for key in list(self.index.keys()):
for tobj in self.index[key]:
tobj.classname = key
tobj.size = tobj.get_max_size()
tobj.tsize = tobj.get_size_at_time(tmax)
self.sorted.extend(self.index[key]) | Prepare the data to be sorted.
If not yet sorted, import all tracked objects from the tracked index.
Extend the tracking information by implicit information to make
sorting easier (DSU pattern). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L133-L152 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | Stats.sort_stats | def sort_stats(self, *args):
"""
Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats()
"""
criteria = ('classname', 'tsize', 'birth', 'death',
'name', 'repr', 'size')
if not set(criteria).issuperset(set(args)):
raise ValueError("Invalid sort criteria")
if not args:
args = criteria
def args_to_tuple(obj):
keys = []
for attr in args:
attribute = getattr(obj, attr)
if attr in ('tsize', 'size'):
attribute = -attribute
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self | python | def sort_stats(self, *args):
"""
Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats()
"""
criteria = ('classname', 'tsize', 'birth', 'death',
'name', 'repr', 'size')
if not set(criteria).issuperset(set(args)):
raise ValueError("Invalid sort criteria")
if not args:
args = criteria
def args_to_tuple(obj):
keys = []
for attr in args:
attribute = getattr(obj, attr)
if attr in ('tsize', 'size'):
attribute = -attribute
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self | Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats() | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L155-L213 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | Stats.annotate_snapshot | def annotate_snapshot(self, snapshot):
"""
Store additional statistical data in snapshot.
"""
if hasattr(snapshot, 'classes'):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname]['merged'] = merged | python | def annotate_snapshot(self, snapshot):
"""
Store additional statistical data in snapshot.
"""
if hasattr(snapshot, 'classes'):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname]['merged'] = merged | Store additional statistical data in snapshot. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L233-L266 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | ConsoleStats._print_refs | def _print_refs(self, refs, total, prefix=' ',
level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
trunc(prefix+str(ref.name), 50),
pp(ref.size),
int(ref.size*100.0/total),
level
))
self._print_refs(ref.refs, total, prefix=prefix+' ',
level=level+1) | python | def _print_refs(self, refs, total, prefix=' ',
level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
trunc(prefix+str(ref.name), 50),
pp(ref.size),
int(ref.size*100.0/total),
level
))
self._print_refs(ref.refs, total, prefix=prefix+' ',
level=level+1) | Print individual referents recursively. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L280-L297 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | ConsoleStats.print_object | def print_object(self, tobj):
"""
Print the gathered information of object `tobj` in human-readable format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=1),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
self.stream.write(_format_trace(tobj.trace))
for (timestamp, size) in tobj.snapshots:
self.stream.write(' %-30s %s\n' % (
pp_timestamp(timestamp), pp(size.size)
))
self._print_refs(size.refs, size.size)
if tobj.death is not None:
self.stream.write(' %-30s finalize\n' % (
pp_timestamp(tobj.death),
)) | python | def print_object(self, tobj):
"""
Print the gathered information of object `tobj` in human-readable format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=1),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
self.stream.write(_format_trace(tobj.trace))
for (timestamp, size) in tobj.snapshots:
self.stream.write(' %-30s %s\n' % (
pp_timestamp(timestamp), pp(size.size)
))
self._print_refs(size.refs, size.size)
if tobj.death is not None:
self.stream.write(' %-30s finalize\n' % (
pp_timestamp(tobj.death),
)) | Print the gathered information of object `tobj` in human-readable format. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L300-L323 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | ConsoleStats.print_stats | def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj) | python | def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj) | Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L326-L357 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | ConsoleStats.print_summary | def print_summary(self):
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY '+'-'*66+'\n')
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
trunc(snapshot.desc, 35),
'active',
pp(snapshot.asizeof_total),
'average',
'pct'
))
for classname in classlist:
info = snapshot.classes.get(classname)
fobj.write(' %-33s %11d %12s %12s %4d%%\n' % (
trunc(classname, 33),
info['active'],
pp(info['sum']),
pp(info['avg']),
info['pct']
))
fobj.write('-'*79+'\n') | python | def print_summary(self):
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY '+'-'*66+'\n')
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
trunc(snapshot.desc, 35),
'active',
pp(snapshot.asizeof_total),
'average',
'pct'
))
for classname in classlist:
info = snapshot.classes.get(classname)
fobj.write(' %-33s %11d %12s %12s %4d%%\n' % (
trunc(classname, 33),
info['active'],
pp(info['sum']),
pp(info['avg']),
info['pct']
))
fobj.write('-'*79+'\n') | Print per-class summary for each snapshot. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L360-L388 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats._print_refs | def _print_refs(self, fobj, refs, total, level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
if level == 1:
fobj.write('<table>\n')
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
data = dict(level=level,
name=trunc(str(ref.name), 128),
size=pp(ref.size),
pct=ref.size*100.0/total)
fobj.write(self.refrow % data)
self._print_refs(fobj, ref.refs, total, level=level+1)
if level == 1:
fobj.write("</table>\n") | python | def _print_refs(self, fobj, refs, total, level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
if level == 1:
fobj.write('<table>\n')
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
data = dict(level=level,
name=trunc(str(ref.name), 128),
size=pp(ref.size),
pct=ref.size*100.0/total)
fobj.write(self.refrow % data)
self._print_refs(fobj, ref.refs, total, level=level+1)
if level == 1:
fobj.write("</table>\n") | Print individual referents recursively. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L434-L452 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.print_class_details | def print_class_details(self, fname, classname):
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj in self.index[classname]:
fobj.write('<table id="tl" width="100%" rules="rows">\n')
fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id))
if tobj.repr:
fobj.write("<tr><td>Representation</td><td>%s </td></tr>\n" % tobj.repr)
fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
if tobj.trace:
trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace)
for (timestamp, size) in tobj.snapshots:
fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
if not size.refs:
fobj.write("<td>%s</td></tr>\n" % pp(size.size))
else:
fobj.write("<td>%s" % pp(size.size))
self._print_refs(fobj, size.refs, size.size)
fobj.write("</td></tr>\n")
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close() | python | def print_class_details(self, fname, classname):
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj in self.index[classname]:
fobj.write('<table id="tl" width="100%" rules="rows">\n')
fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id))
if tobj.repr:
fobj.write("<tr><td>Representation</td><td>%s </td></tr>\n" % tobj.repr)
fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
if tobj.trace:
trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace)
for (timestamp, size) in tobj.snapshots:
fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
if not size.refs:
fobj.write("<td>%s</td></tr>\n" % pp(size.size))
else:
fobj.write("<td>%s" % pp(size.size))
self._print_refs(fobj, size.refs, size.size)
fobj.write("</td></tr>\n")
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close() | Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L460-L515 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.relative_path | def relative_path(self, filepath, basepath=None):
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if not basepath:
return filepath
if filepath.startswith(basepath):
rel = filepath[len(basepath):]
if rel and rel[0] == os.sep:
rel = rel[1:]
return rel | python | def relative_path(self, filepath, basepath=None):
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if not basepath:
return filepath
if filepath.startswith(basepath):
rel = filepath[len(basepath):]
if rel and rel[0] == os.sep:
rel = rel[1:]
return rel | Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L537-L550 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_title_page | def create_title_page(self, filename, title=''):
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
data = snapshot.classes[classname].copy()
data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname)
data['sum'] = pp(data['sum'])
data['avg'] = pp(data['avg'])
fobj.write(self.snapshot_cls % data)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close() | python | def create_title_page(self, filename, title=''):
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
data = snapshot.classes[classname].copy()
data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname)
data['sum'] = pp(data['sum'])
data['avg'] = pp(data['avg'])
fobj.write(self.snapshot_cls % data)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close() | Output the title page. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L552-L601 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_lifetime_chart | def create_lifetime_chart(self, classname, filename=''):
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname+" lifetime")
cnt = []
for tobj in self.index[classname]:
cnt.append([tobj.birth, 1])
if tobj.death:
cnt.append([tobj.death, -1])
cnt.sort()
for i in range(1, len(cnt)):
cnt[i][1] += cnt[i-1][1]
#if cnt[i][0] == cnt[i-1][0]:
# del cnt[i-1]
x = [t for [t,c] in cnt]
y = [c for [t,c] in cnt]
figure()
xlabel("Execution time [s]")
ylabel("Instance #")
title("%s instances" % classname)
plot(x, y, 'o')
savefig(filename)
return self.chart_tag % (os.path.basename(filename)) | python | def create_lifetime_chart(self, classname, filename=''):
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname+" lifetime")
cnt = []
for tobj in self.index[classname]:
cnt.append([tobj.birth, 1])
if tobj.death:
cnt.append([tobj.death, -1])
cnt.sort()
for i in range(1, len(cnt)):
cnt[i][1] += cnt[i-1][1]
#if cnt[i][0] == cnt[i-1][0]:
# del cnt[i-1]
x = [t for [t,c] in cnt]
y = [c for [t,c] in cnt]
figure()
xlabel("Execution time [s]")
ylabel("Instance #")
title("%s instances" % classname)
plot(x, y, 'o')
savefig(filename)
return self.chart_tag % (os.path.basename(filename)) | Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L603-L634 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_snapshot_chart | def create_snapshot_chart(self, filename=''):
"""
Create chart that depicts the memory allocation over time apportioned to
the tracked classes.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots]
if max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots]
sz = [sx+sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append( ((xp, yp), {'label': cn}) )
poly_labels.append(cn)
base = sz
figure()
title("Snapshot Memory")
xlabel("Execution Time [s]")
ylabel("Virtual Memory [MiB]")
sizes = [float(fp.asizeof_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'r--', label='Total')
sizes = [float(fp.tracked_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'b--', label='Tracked total')
for (args, kwds) in polys:
fill(*args, **kwds)
legend(loc=2)
savefig(filename)
return self.chart_tag % (self.relative_path(filename)) | python | def create_snapshot_chart(self, filename=''):
"""
Create chart that depicts the memory allocation over time apportioned to
the tracked classes.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots]
if max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots]
sz = [sx+sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append( ((xp, yp), {'label': cn}) )
poly_labels.append(cn)
base = sz
figure()
title("Snapshot Memory")
xlabel("Execution Time [s]")
ylabel("Virtual Memory [MiB]")
sizes = [float(fp.asizeof_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'r--', label='Total')
sizes = [float(fp.tracked_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'b--', label='Tracked total')
for (args, kwds) in polys:
fill(*args, **kwds)
legend(loc=2)
savefig(filename)
return self.chart_tag % (self.relative_path(filename)) | Create chart that depicts the memory allocation over time apportioned to
the tracked classes. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L636-L678 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_pie_chart | def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename)) | python | def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename)) | Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L680-L711 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_html | def create_html(self, fname, title="ClassTracker Statistics"):
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {}
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, 'fp%d.png' % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png')
self.charts[cn] = self.create_lifetime_chart(cn, fn)
# Create HTML pages first for each class and then the index page.
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html')
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title) | python | def create_html(self, fname, title="ClassTracker Statistics"):
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {}
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, 'fp%d.png' % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png')
self.charts[cn] = self.create_lifetime_chart(cn, fn)
# Create HTML pages first for each class and then the index page.
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html')
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title) | Create HTML page `fname` and additional files in a directory derived
from `fname`. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L713-L750 |
lrq3000/pyFileFixity | pyFileFixity/lib/pathlib2.py | _Selector.select_from | def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir) | python | def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir) | Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L595-L602 |
lrq3000/pyFileFixity | pyFileFixity/lib/pathlib2.py | PurePath.relative_to | def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:]) | python | def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:]) | Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L958-L988 |
lrq3000/pyFileFixity | pyFileFixity/lib/pathlib2.py | Path.glob | def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p | python | def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p | Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L1190-L1200 |
lrq3000/pyFileFixity | pyFileFixity/lib/pathlib2.py | Path.write_bytes | def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data) | python | def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data) | Open the file in bytes mode, write to it, and close the file. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L1303-L1312 |
lrq3000/pyFileFixity | pyFileFixity/lib/pathlib2.py | Path.write_text | def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data) | python | def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data) | Open the file in text mode, write to it, and close the file. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L1314-L1323 |
lrq3000/pyFileFixity | pyFileFixity/replication_repair.py | relpath_posix | def relpath_posix(recwalk_result, pardir, fromwinpath=False):
''' Helper function to convert all paths to relative posix like paths (to ease comparison) '''
return recwalk_result[0], path2unix(os.path.join(os.path.relpath(recwalk_result[0], pardir),recwalk_result[1]), nojoin=True, fromwinpath=fromwinpath) | python | def relpath_posix(recwalk_result, pardir, fromwinpath=False):
''' Helper function to convert all paths to relative posix like paths (to ease comparison) '''
return recwalk_result[0], path2unix(os.path.join(os.path.relpath(recwalk_result[0], pardir),recwalk_result[1]), nojoin=True, fromwinpath=fromwinpath) | Helper function to convert all paths to relative posix like paths (to ease comparison) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L62-L64 |
lrq3000/pyFileFixity | pyFileFixity/replication_repair.py | sort_dict_of_paths | def sort_dict_of_paths(d):
""" Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths. """
# Find the path that is the deepest, and count the number of parts
max_rec = max(len(x) if x else 0 for x in d.values())
# Pad other paths with empty parts to fill in, so that all paths will have the same number of parts (necessary to compare correctly, else deeper paths may get precedence over top ones, since the folder name will be compared to filenames!)
for key in d.keys():
if d[key]:
d[key] = ['']*(max_rec-len(d[key])) + d[key]
# Sort the dict relatively to the paths alphabetical order
d_sort = sorted(d.items(), key=lambda x: x[1])
return d_sort | python | def sort_dict_of_paths(d):
""" Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths. """
# Find the path that is the deepest, and count the number of parts
max_rec = max(len(x) if x else 0 for x in d.values())
# Pad other paths with empty parts to fill in, so that all paths will have the same number of parts (necessary to compare correctly, else deeper paths may get precedence over top ones, since the folder name will be compared to filenames!)
for key in d.keys():
if d[key]:
d[key] = ['']*(max_rec-len(d[key])) + d[key]
# Sort the dict relatively to the paths alphabetical order
d_sort = sorted(d.items(), key=lambda x: x[1])
return d_sort | Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L69-L79 |
lrq3000/pyFileFixity | pyFileFixity/replication_repair.py | sort_group | def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
# No element, then we just return
if base_elt[1] is None:
return None
# Else, we will now group equivalent files together (remember we are working on multiple directories, so we can have multiple equivalent relative filepaths, but of course the absolute filepaths are different).
else:
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = []
lst.append([base_elt])
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
if elt[1] is not None:
# If the filepath is the same to the latest grouped filepath, we add it to the same group
if elt[1] == base_elt[1]:
lst[-1].append(elt)
# Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath
else:
if return_only_first: break # break here if we only need the first group
lst.append([elt])
base_elt = elt # replace the latest grouped filepath
return lst | python | def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
# No element, then we just return
if base_elt[1] is None:
return None
# Else, we will now group equivalent files together (remember we are working on multiple directories, so we can have multiple equivalent relative filepaths, but of course the absolute filepaths are different).
else:
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = []
lst.append([base_elt])
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
if elt[1] is not None:
# If the filepath is the same to the latest grouped filepath, we add it to the same group
if elt[1] == base_elt[1]:
lst[-1].append(elt)
# Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath
else:
if return_only_first: break # break here if we only need the first group
lst.append([elt])
base_elt = elt # replace the latest grouped filepath
return lst | Sort a dictionary of relative paths and cluster equal paths together at the same time | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L81-L110 |
lrq3000/pyFileFixity | pyFileFixity/replication_repair.py | majority_vote_byte_scan | def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False):
'''Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).'''
# The idea of replication combined with ECC was a bit inspired by this paper: Friedman, Roy, Yoav Kantor, and Amir Kantor. "Combining Erasure-Code and Replication Redundancy Schemes for Increased Storage and Repair Efficiency in P2P Storage Systems.", 2013, Technion, Computer Science Department, Technical Report CS-2013-03
# But it is a very well known concept in redundancy engineering, usually called triple-modular redundancy (which is here extended to n-modular since we can supply any number of files we want, not just three).
# Preference in case of ambiguity is always given to the file of the first folder.
fileshandles = []
for filepath in fileslist:
if filepath:
# Already a file handle? Just store it in the fileshandles list
if hasattr(filepath, 'read'):
fileshandles.append(filepath)
# Else it's a string filepath, open the file
else:
fileshandles.append(open(filepath, 'rb'))
# Create and open output (merged) file, except if we were already given a file handle
if hasattr(outpath, 'write'):
outfile = outpath
else:
outpathfull = os.path.join(outpath, relfilepath)
pardir = os.path.dirname(outpathfull)
if not os.path.exists(pardir):
os.makedirs(pardir)
outfile = open(outpathfull, 'wb')
# Cannot vote if there's not at least 3 files!
# In this case, just copy the file from the first folder, verbatim
if len(fileshandles) < 3:
# If there's at least one input file, then copy it verbatim to the output folder
if fileshandles:
create_dir_if_not_exist(os.path.dirname(outpathfull))
buf = 1
while (buf):
buf = fileshandles[0].read()
outfile.write(buf)
outfile.flush()
return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles)))
errors = []
entries = [1]*len(fileshandles) # init with 0 to start the while loop
while (entries.count('') < len(fileshandles)):
final_entry = []
# Read a block from all input files into memory
for i in xrange(len(fileshandles)):
entries[i] = fileshandles[i].read(blocksize)
# End of file for all files, we exit
if entries.count('') == len(fileshandles):
break
# Else if there's only one file, just copy the file's content over
elif len(entries) == 1:
final_entry = entries[0]
# Else, do the majority vote
else:
# Walk along each column (imagine the strings being rows in a matrix, then we pick one column at each iteration = all characters at position i of each string), so that we can compare these characters easily
for i in xrange(max(len(entry) for entry in entries)):
hist = {} # kind of histogram, we just memorize how many times a character is presented at the position i in each string TODO: use collections.Counter instead of dict()?
# Extract the character at position i of each string and compute the histogram at the same time (number of time this character appear among all strings at this position i)
for entry in entries:
# Check if we are not beyond the current entry's length
if i < len(entry): # TODO: check this line, this should allow the vote to continue even if some files are shorter than others
# Extract the character and use it to contribute to the histogram
# TODO: add warning message when one file is not of the same size as the others
key = str(ord(entry[i])) # convert to the ascii value to avoid any funky problem with encoding in dict keys
hist[key] = hist.get(key, 0) + 1 # increment histogram for this value. If it does not exists, use 0. (essentially equivalent to hist[key] += 1 but with exception management if key did not already exists)
# If there's only one character (it's the same accross all strings at position i), then it's an exact match, we just save the character and we can skip to the next iteration
if len(hist) == 1:
final_entry.append(chr(int(hist.iterkeys().next())))
continue
# Else, the character is different among different entries, we will pick the major one (mode)
elif len(hist) > 1:
# Sort the dict by value (and reverse because we want the most frequent first)
skeys = sorted(hist, key=hist.get, reverse=True)
# Ambiguity! If each entries present a different character (thus the major has only an occurrence of 1), then it's too ambiguous and we just set a null byte to signal that
if hist[skeys[0]] == 1:
if default_char_null:
if default_char_null is True:
final_entry.append("\x00")
else:
final_entry.append(default_char_null)
else:
# Use the entry of the first file that is still open
first_char = ''
for entry in entries:
# Found the first file that has a character at this position: store it and break loop
if i < len(entry):
first_char = entry[i]
break
# Use this character in spite of ambiguity
final_entry.append(first_char)
errors.append(outfile.tell() + i) # Print an error indicating the characters that failed
# Else if there is a tie (at least two characters appear with the same frequency), then we just pick one of them
elif hist[skeys[0]] == hist[skeys[1]]:
final_entry.append(chr(int(skeys[0]))) # TODO: find a way to account for both characters. Maybe return two different strings that will both have to be tested? (eg: maybe one has a tampered hash, both will be tested and if one correction pass the hash then it's ok we found the correct one)
# Else we have a clear major character that appear in more entries than any other character, then we keep this one
else:
final_entry.append(chr(int(skeys[0]))) # alternative one-liner: max(hist.iteritems(), key=operator.itemgetter(1))[0]
continue
# Concatenate to a string (this is faster than using a string from the start and concatenating at each iteration because Python strings are immutable so Python has to copy over the whole string, it's in O(n^2)
final_entry = ''.join(final_entry)
# Commit to output file
outfile.write(final_entry)
outfile.flush()
# Errors signaling
if errors:
error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors]) # Signal to user that this file has unrecoverable corruptions (he may try to fix the bits manually or with his own script)
return (1, error_msg) # return an error
# Close all input files
for fh in fileshandles:
fh.close()
# Close output file
if outfile != outpath: # close only if we were not given a file handle in the first place
outfile.flush()
outfile.close()
return (0, None) | python | def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False):
'''Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).'''
# The idea of replication combined with ECC was a bit inspired by this paper: Friedman, Roy, Yoav Kantor, and Amir Kantor. "Combining Erasure-Code and Replication Redundancy Schemes for Increased Storage and Repair Efficiency in P2P Storage Systems.", 2013, Technion, Computer Science Department, Technical Report CS-2013-03
# But it is a very well known concept in redundancy engineering, usually called triple-modular redundancy (which is here extended to n-modular since we can supply any number of files we want, not just three).
# Preference in case of ambiguity is always given to the file of the first folder.
fileshandles = []
for filepath in fileslist:
if filepath:
# Already a file handle? Just store it in the fileshandles list
if hasattr(filepath, 'read'):
fileshandles.append(filepath)
# Else it's a string filepath, open the file
else:
fileshandles.append(open(filepath, 'rb'))
# Create and open output (merged) file, except if we were already given a file handle
if hasattr(outpath, 'write'):
outfile = outpath
else:
outpathfull = os.path.join(outpath, relfilepath)
pardir = os.path.dirname(outpathfull)
if not os.path.exists(pardir):
os.makedirs(pardir)
outfile = open(outpathfull, 'wb')
# Cannot vote if there's not at least 3 files!
# In this case, just copy the file from the first folder, verbatim
if len(fileshandles) < 3:
# If there's at least one input file, then copy it verbatim to the output folder
if fileshandles:
create_dir_if_not_exist(os.path.dirname(outpathfull))
buf = 1
while (buf):
buf = fileshandles[0].read()
outfile.write(buf)
outfile.flush()
return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles)))
errors = []
entries = [1]*len(fileshandles) # init with 0 to start the while loop
while (entries.count('') < len(fileshandles)):
final_entry = []
# Read a block from all input files into memory
for i in xrange(len(fileshandles)):
entries[i] = fileshandles[i].read(blocksize)
# End of file for all files, we exit
if entries.count('') == len(fileshandles):
break
# Else if there's only one file, just copy the file's content over
elif len(entries) == 1:
final_entry = entries[0]
# Else, do the majority vote
else:
# Walk along each column (imagine the strings being rows in a matrix, then we pick one column at each iteration = all characters at position i of each string), so that we can compare these characters easily
for i in xrange(max(len(entry) for entry in entries)):
hist = {} # kind of histogram, we just memorize how many times a character is presented at the position i in each string TODO: use collections.Counter instead of dict()?
# Extract the character at position i of each string and compute the histogram at the same time (number of time this character appear among all strings at this position i)
for entry in entries:
# Check if we are not beyond the current entry's length
if i < len(entry): # TODO: check this line, this should allow the vote to continue even if some files are shorter than others
# Extract the character and use it to contribute to the histogram
# TODO: add warning message when one file is not of the same size as the others
key = str(ord(entry[i])) # convert to the ascii value to avoid any funky problem with encoding in dict keys
hist[key] = hist.get(key, 0) + 1 # increment histogram for this value. If it does not exists, use 0. (essentially equivalent to hist[key] += 1 but with exception management if key did not already exists)
# If there's only one character (it's the same accross all strings at position i), then it's an exact match, we just save the character and we can skip to the next iteration
if len(hist) == 1:
final_entry.append(chr(int(hist.iterkeys().next())))
continue
# Else, the character is different among different entries, we will pick the major one (mode)
elif len(hist) > 1:
# Sort the dict by value (and reverse because we want the most frequent first)
skeys = sorted(hist, key=hist.get, reverse=True)
# Ambiguity! If each entries present a different character (thus the major has only an occurrence of 1), then it's too ambiguous and we just set a null byte to signal that
if hist[skeys[0]] == 1:
if default_char_null:
if default_char_null is True:
final_entry.append("\x00")
else:
final_entry.append(default_char_null)
else:
# Use the entry of the first file that is still open
first_char = ''
for entry in entries:
# Found the first file that has a character at this position: store it and break loop
if i < len(entry):
first_char = entry[i]
break
# Use this character in spite of ambiguity
final_entry.append(first_char)
errors.append(outfile.tell() + i) # Print an error indicating the characters that failed
# Else if there is a tie (at least two characters appear with the same frequency), then we just pick one of them
elif hist[skeys[0]] == hist[skeys[1]]:
final_entry.append(chr(int(skeys[0]))) # TODO: find a way to account for both characters. Maybe return two different strings that will both have to be tested? (eg: maybe one has a tampered hash, both will be tested and if one correction pass the hash then it's ok we found the correct one)
# Else we have a clear major character that appear in more entries than any other character, then we keep this one
else:
final_entry.append(chr(int(skeys[0]))) # alternative one-liner: max(hist.iteritems(), key=operator.itemgetter(1))[0]
continue
# Concatenate to a string (this is faster than using a string from the start and concatenating at each iteration because Python strings are immutable so Python has to copy over the whole string, it's in O(n^2)
final_entry = ''.join(final_entry)
# Commit to output file
outfile.write(final_entry)
outfile.flush()
# Errors signaling
if errors:
error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors]) # Signal to user that this file has unrecoverable corruptions (he may try to fix the bits manually or with his own script)
return (1, error_msg) # return an error
# Close all input files
for fh in fileshandles:
fh.close()
# Close output file
if outfile != outpath: # close only if we were not given a file handle in the first place
outfile.flush()
outfile.close()
return (0, None) | Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L112-L230 |
lrq3000/pyFileFixity | pyFileFixity/replication_repair.py | synchronize_files | def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False):
''' Main function to synchronize files contents by majority vote
The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one.
The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
'''
# (Generator) Files Synchronization Algorithm:
# Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable.
# Until there's no file in any of the input folders to be processed:
# - curfiles <- load first file for each folder by using stable_dir_walking on each input folder.
# - curfiles_grouped <- group curfiles_ordered:
# * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity)
# * curfiles_grouped <- empty list
# * curfiles_grouped[0] = add first element in curfiles_ordered
# * last_group = 0
# * for every subsequent element nextelt in curfiles_ordered:
# . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped)
# . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group]
# At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order.
# - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file.
# - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before.
# At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder.
# Init files walking generator for each inputpaths
recgen = [recwalk(path, sorting=True) for path in inputpaths]
curfiles = {}
recgen_exhausted = {}
recgen_exhausted_count = 0
nbpaths = len(inputpaths)
retcode = 0
if not ptee: ptee = sys.stdout
# Open report file and write header
if report_file is not None:
rfile = open(report_file, 'wb')
r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"')
r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"]
r_length = len(r_header)
r_writer.writerow(r_header)
# Initialization: load the first batch of files, one for each folder
for i in xrange(len(recgen)):
recgen_exhausted[i] = False
try:
if curfiles.get(i, None) is None:
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
except StopIteration:
recgen_exhausted[i] = True
recgen_exhausted_count += 1
# Files lists alignment loop
while recgen_exhausted_count < nbpaths:
errcode = 0
errmsg = None
# Init a new report's row
if report_file: r_row = ["-"] * r_length
# -- Group equivalent relative filepaths together
#print curfiles # debug
curfiles_grouped = sort_group(curfiles, True)
# -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms)
# Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now
to_process = curfiles_grouped[0]
#print to_process # debug
# -- Byte-by-byte majority vote on the first group of files
# Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group)
relfilepath = path2unix(os.path.join(*to_process[0][1]))
if report_file: r_row[0] = relfilepath
if verbose: ptee.write("- Processing file %s." % relfilepath)
# Generate output path
outpathfull = os.path.join(outpath, relfilepath)
create_dir_if_not_exist(os.path.dirname(outpathfull))
# Initialize the list of absolute filepaths
fileslist = []
for elt in to_process:
i = elt[0]
fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1])))
if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file
# If there's only one file, just copy it over
if len(to_process) == 1:
shutil.copyfile(fileslist[0], outpathfull)
id = to_process[0][0]
if report_file: r_row[id+1] = 'O'
# Else, merge by majority vote
else:
# Before-merge check using rfigc database, if provided
# If one of the files in the input folders is already correct, just copy it over
correct_file = None
if database:
for id, filepath in enumerate(fileslist):
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0:
correct_file = filepath
correct_id = to_process[id][0]
break
# If one correct file was found, copy it over
if correct_file:
create_dir_if_not_exist(os.path.dirname(outpathfull))
shutil.copyfile(correct_file, outpathfull)
if report_file:
r_row[correct_id+1] = "O"
r_row[-3] = "OK"
# Else, we need to do the majority vote merge
else:
# Do the majority vote merge
errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath)
# After-merge/move check using rfigc database, if provided
if database:
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1:
errcode = 1
r_row[-3] = "KO"
if not errmsg: errmsg = ''
errmsg += " File could not be totally repaired according to rfigc database."
else:
if report_file:
r_row[-3] = "OK"
if errmsg: errmsg += " But merged file is correct according to rfigc database."
# Display errors if any
if errcode:
if report_file:
r_row[-2] = "KO"
r_row[-1] = errmsg
ptee.write(errmsg)
retcode = 1
else:
if report_file: r_row[-2] = "OK"
# Save current report's row
if report_file:
r_writer.writerow(r_row)
# -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment)
for elt in to_process: # for files of the first group (the ones we processed)
i = elt[0]
# Walk their respective folders and load up the next file
try:
if not recgen_exhausted.get(i, False):
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
# If there's no file left in this folder, mark this input folder as exhausted and continue with the others
except StopIteration:
curfiles[i] = None
recgen_exhausted[i] = True
recgen_exhausted_count += 1
if tqdm_bar: tqdm_bar.update()
if tqdm_bar: tqdm_bar.close()
# Closing report file
if report_file:
# Write list of directories and legend
rfile.write("\n=> Input directories:")
for id, ipath in enumerate(inputpaths):
rfile.write("\n\t- dir%i = %s" % ((id+1), ipath))
rfile.write("\n=> Output directory: %s" % outpath)
rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n")
# Close the report file handle
rfile.close()
return retcode | python | def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False):
''' Main function to synchronize files contents by majority vote
The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one.
The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
'''
# (Generator) Files Synchronization Algorithm:
# Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable.
# Until there's no file in any of the input folders to be processed:
# - curfiles <- load first file for each folder by using stable_dir_walking on each input folder.
# - curfiles_grouped <- group curfiles_ordered:
# * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity)
# * curfiles_grouped <- empty list
# * curfiles_grouped[0] = add first element in curfiles_ordered
# * last_group = 0
# * for every subsequent element nextelt in curfiles_ordered:
# . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped)
# . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group]
# At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order.
# - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file.
# - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before.
# At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder.
# Init files walking generator for each inputpaths
recgen = [recwalk(path, sorting=True) for path in inputpaths]
curfiles = {}
recgen_exhausted = {}
recgen_exhausted_count = 0
nbpaths = len(inputpaths)
retcode = 0
if not ptee: ptee = sys.stdout
# Open report file and write header
if report_file is not None:
rfile = open(report_file, 'wb')
r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"')
r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"]
r_length = len(r_header)
r_writer.writerow(r_header)
# Initialization: load the first batch of files, one for each folder
for i in xrange(len(recgen)):
recgen_exhausted[i] = False
try:
if curfiles.get(i, None) is None:
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
except StopIteration:
recgen_exhausted[i] = True
recgen_exhausted_count += 1
# Files lists alignment loop
while recgen_exhausted_count < nbpaths:
errcode = 0
errmsg = None
# Init a new report's row
if report_file: r_row = ["-"] * r_length
# -- Group equivalent relative filepaths together
#print curfiles # debug
curfiles_grouped = sort_group(curfiles, True)
# -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms)
# Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now
to_process = curfiles_grouped[0]
#print to_process # debug
# -- Byte-by-byte majority vote on the first group of files
# Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group)
relfilepath = path2unix(os.path.join(*to_process[0][1]))
if report_file: r_row[0] = relfilepath
if verbose: ptee.write("- Processing file %s." % relfilepath)
# Generate output path
outpathfull = os.path.join(outpath, relfilepath)
create_dir_if_not_exist(os.path.dirname(outpathfull))
# Initialize the list of absolute filepaths
fileslist = []
for elt in to_process:
i = elt[0]
fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1])))
if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file
# If there's only one file, just copy it over
if len(to_process) == 1:
shutil.copyfile(fileslist[0], outpathfull)
id = to_process[0][0]
if report_file: r_row[id+1] = 'O'
# Else, merge by majority vote
else:
# Before-merge check using rfigc database, if provided
# If one of the files in the input folders is already correct, just copy it over
correct_file = None
if database:
for id, filepath in enumerate(fileslist):
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0:
correct_file = filepath
correct_id = to_process[id][0]
break
# If one correct file was found, copy it over
if correct_file:
create_dir_if_not_exist(os.path.dirname(outpathfull))
shutil.copyfile(correct_file, outpathfull)
if report_file:
r_row[correct_id+1] = "O"
r_row[-3] = "OK"
# Else, we need to do the majority vote merge
else:
# Do the majority vote merge
errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath)
# After-merge/move check using rfigc database, if provided
if database:
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1:
errcode = 1
r_row[-3] = "KO"
if not errmsg: errmsg = ''
errmsg += " File could not be totally repaired according to rfigc database."
else:
if report_file:
r_row[-3] = "OK"
if errmsg: errmsg += " But merged file is correct according to rfigc database."
# Display errors if any
if errcode:
if report_file:
r_row[-2] = "KO"
r_row[-1] = errmsg
ptee.write(errmsg)
retcode = 1
else:
if report_file: r_row[-2] = "OK"
# Save current report's row
if report_file:
r_writer.writerow(r_row)
# -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment)
for elt in to_process: # for files of the first group (the ones we processed)
i = elt[0]
# Walk their respective folders and load up the next file
try:
if not recgen_exhausted.get(i, False):
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
# If there's no file left in this folder, mark this input folder as exhausted and continue with the others
except StopIteration:
curfiles[i] = None
recgen_exhausted[i] = True
recgen_exhausted_count += 1
if tqdm_bar: tqdm_bar.update()
if tqdm_bar: tqdm_bar.close()
# Closing report file
if report_file:
# Write list of directories and legend
rfile.write("\n=> Input directories:")
for id, ipath in enumerate(inputpaths):
rfile.write("\n\t- dir%i = %s" % ((id+1), ipath))
rfile.write("\n=> Output directory: %s" % outpath)
rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n")
# Close the report file handle
rfile.close()
return retcode | Main function to synchronize files contents by majority vote
The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one.
The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L232-L394 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | RefBrowser.get_tree | def get_tree(self):
"""Get a tree of referrers of the root object."""
self.ignore.append(inspect.currentframe())
return self._get_tree(self.root, self.maxdepth) | python | def get_tree(self):
"""Get a tree of referrers of the root object."""
self.ignore.append(inspect.currentframe())
return self._get_tree(self.root, self.maxdepth) | Get a tree of referrers of the root object. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L86-L89 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | RefBrowser._get_tree | def _get_tree(self, root, maxdepth):
"""Workhorse of the get_tree implementation.
This is an recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build.
"""
self.ignore.append(inspect.currentframe())
res = _Node(root, self.str_func) #PYCHOK use root parameter
self.already_included.add(id(root)) #PYCHOK use root parameter
if maxdepth == 0:
return res
objects = gc.get_referrers(root) #PYCHOK use root parameter
self.ignore.append(objects)
for o in objects:
# XXX: find a better way to ignore dict of _Node objects
if isinstance(o, dict):
sampleNode = _Node(1)
if list(sampleNode.__dict__.keys()) == list(o.keys()):
continue
_id = id(o)
if not self.repeat and (_id in self.already_included):
s = self.str_func(o)
res.children.append("%s (already included, id %s)" %\
(s, _id))
continue
if (not isinstance(o, _Node)) and (o not in self.ignore):
res.children.append(self._get_tree(o, maxdepth-1))
return res | python | def _get_tree(self, root, maxdepth):
"""Workhorse of the get_tree implementation.
This is an recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build.
"""
self.ignore.append(inspect.currentframe())
res = _Node(root, self.str_func) #PYCHOK use root parameter
self.already_included.add(id(root)) #PYCHOK use root parameter
if maxdepth == 0:
return res
objects = gc.get_referrers(root) #PYCHOK use root parameter
self.ignore.append(objects)
for o in objects:
# XXX: find a better way to ignore dict of _Node objects
if isinstance(o, dict):
sampleNode = _Node(1)
if list(sampleNode.__dict__.keys()) == list(o.keys()):
continue
_id = id(o)
if not self.repeat and (_id in self.already_included):
s = self.str_func(o)
res.children.append("%s (already included, id %s)" %\
(s, _id))
continue
if (not isinstance(o, _Node)) and (o not in self.ignore):
res.children.append(self._get_tree(o, maxdepth-1))
return res | Workhorse of the get_tree implementation.
This is an recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L91-L122 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | StreamBrowser.print_tree | def print_tree(self, tree=None):
""" Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject.
"""
if tree is None:
self._print(self.root, '', '')
else:
self._print(tree, '', '') | python | def print_tree(self, tree=None):
""" Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject.
"""
if tree is None:
self._print(self.root, '', '')
else:
self._print(tree, '', '') | Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L138-L149 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | StreamBrowser._print | def _print(self, tree, prefix, carryon):
"""Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines
"""
level = prefix.count(self.cross) + prefix.count(self.vline)
len_children = 0
if isinstance(tree , _Node):
len_children = len(tree.children)
# add vertex
prefix += str(tree)
# and as many spaces as the vertex is long
carryon += self.space * len(str(tree))
if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\
(len_children == 0):
self.stream.write(prefix+'\n')
return
else:
# add in between connections
prefix += self.hline
carryon += self.space
# if there is more than one branch, add a cross
if len(tree.children) > 1:
prefix += self.cross
carryon += self.vline
prefix += self.hline
carryon += self.space
if len_children > 0:
# print the first branch (on the same line)
self._print(tree.children[0], prefix, carryon)
for b in range(1, len_children):
# the caryon becomes the prefix for all following children
prefix = carryon[:-2] + self.cross + self.hline
# remove the vlines for any children of last branch
if b == (len_children-1):
carryon = carryon[:-2] + 2*self.space
self._print(tree.children[b], prefix, carryon)
# leave a free line before the next branch
if b == (len_children-1):
if len(carryon.strip(' ')) == 0:
return
self.stream.write(carryon[:-2].rstrip()+'\n') | python | def _print(self, tree, prefix, carryon):
"""Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines
"""
level = prefix.count(self.cross) + prefix.count(self.vline)
len_children = 0
if isinstance(tree , _Node):
len_children = len(tree.children)
# add vertex
prefix += str(tree)
# and as many spaces as the vertex is long
carryon += self.space * len(str(tree))
if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\
(len_children == 0):
self.stream.write(prefix+'\n')
return
else:
# add in between connections
prefix += self.hline
carryon += self.space
# if there is more than one branch, add a cross
if len(tree.children) > 1:
prefix += self.cross
carryon += self.vline
prefix += self.hline
carryon += self.space
if len_children > 0:
# print the first branch (on the same line)
self._print(tree.children[0], prefix, carryon)
for b in range(1, len_children):
# the caryon becomes the prefix for all following children
prefix = carryon[:-2] + self.cross + self.hline
# remove the vlines for any children of last branch
if b == (len_children-1):
carryon = carryon[:-2] + 2*self.space
self._print(tree.children[b], prefix, carryon)
# leave a free line before the next branch
if b == (len_children-1):
if len(carryon.strip(' ')) == 0:
return
self.stream.write(carryon[:-2].rstrip()+'\n') | Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L151-L200 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | FileBrowser.print_tree | def print_tree(self, filename, tree=None):
""" Print referrers tree to file (in text format).
keyword arguments
tree -- if not None, the passed tree will be printed.
"""
old_stream = self.stream
self.stream = open(filename, 'w')
try:
super(FileBrowser, self).print_tree(tree=tree)
finally:
self.stream.close()
self.stream = old_stream | python | def print_tree(self, filename, tree=None):
""" Print referrers tree to file (in text format).
keyword arguments
tree -- if not None, the passed tree will be printed.
"""
old_stream = self.stream
self.stream = open(filename, 'w')
try:
super(FileBrowser, self).print_tree(tree=tree)
finally:
self.stream.close()
self.stream = old_stream | Print referrers tree to file (in text format).
keyword arguments
tree -- if not None, the passed tree will be printed. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L215-L228 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/refbrowser.py | InteractiveBrowser.main | def main(self, standalone=False):
"""Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
"""
window = _Tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg="white",\
highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop() | python | def main(self, standalone=False):
"""Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
"""
window = _Tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg="white",\
highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop() | Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refbrowser.py#L392-L408 |
lrq3000/pyFileFixity | pyFileFixity/lib/gooey/gui/widgets/widget_pack.py | CounterPayload.getValue | def getValue(self):
'''
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
'''
dropdown_value = self.widget.GetValue()
if not str(dropdown_value).isdigit():
return ''
arg = str(self.option_string).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args | python | def getValue(self):
'''
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
'''
dropdown_value = self.widget.GetValue()
if not str(dropdown_value).isdigit():
return ''
arg = str(self.option_string).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args | Returns
str(option_string * DropDown Value)
e.g.
-vvvvv | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/widgets/widget_pack.py#L199-L211 |
lrq3000/pyFileFixity | pyFileFixity/lib/tqdm/_tqdm.py | format_meter | def format_meter(n, total, elapsed, ncols=None, prefix='',
unit=None, unit_scale=False, ascii=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If None, only basic progress
statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If sepcified, dynamically
resizes the progress meter [default: None]. The fallback meter
width is 10.
prefix : str, optional
Prefix message (included in total width).
unit : str, optional
String that will be used to define the unit of each iteration.
[default: "it"]
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.). [default: False]
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
if elapsed:
if unit_scale:
rate = format_sizeof(n / elapsed, suffix='')
else:
rate = '{0:5.2f}'.format(n / elapsed)
else:
rate = '?'
rate_unit = unit if unit else 'it'
if not unit:
unit = ''
n_fmt = str(n)
total_fmt = str(total)
if unit_scale:
n_fmt = format_sizeof(n, suffix='')
if total:
total_fmt = format_sizeof(total, suffix='')
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = '{1}{0:.0f}%|'.format(percentage, prefix) if prefix else \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1}{2} [{3}<{4}, {5} {6}/s]'.format(
n_fmt, total_fmt, unit, elapsed_str, remaining_str,
rate, rate_unit)
if ncols == 0:
bar = ''
else:
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#'*bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588)*bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0) # spacing
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0) # spacing
return l_bar + full_bar + r_bar
else: # no progressbar nor ETA, just progress statistics
return '{0}{1} [{2}, {3} {4}/s]'.format(
n_fmt, unit, elapsed_str, rate, rate_unit) | python | def format_meter(n, total, elapsed, ncols=None, prefix='',
unit=None, unit_scale=False, ascii=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If None, only basic progress
statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If sepcified, dynamically
resizes the progress meter [default: None]. The fallback meter
width is 10.
prefix : str, optional
Prefix message (included in total width).
unit : str, optional
String that will be used to define the unit of each iteration.
[default: "it"]
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.). [default: False]
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
if elapsed:
if unit_scale:
rate = format_sizeof(n / elapsed, suffix='')
else:
rate = '{0:5.2f}'.format(n / elapsed)
else:
rate = '?'
rate_unit = unit if unit else 'it'
if not unit:
unit = ''
n_fmt = str(n)
total_fmt = str(total)
if unit_scale:
n_fmt = format_sizeof(n, suffix='')
if total:
total_fmt = format_sizeof(total, suffix='')
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = '{1}{0:.0f}%|'.format(percentage, prefix) if prefix else \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1}{2} [{3}<{4}, {5} {6}/s]'.format(
n_fmt, total_fmt, unit, elapsed_str, remaining_str,
rate, rate_unit)
if ncols == 0:
bar = ''
else:
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#'*bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588)*bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0) # spacing
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0) # spacing
return l_bar + full_bar + r_bar
else: # no progressbar nor ETA, just progress statistics
return '{0}{1} [{2}, {3} {4}/s]'.format(
n_fmt, unit, elapsed_str, rate, rate_unit) | Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If None, only basic progress
statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If sepcified, dynamically
resizes the progress meter [default: None]. The fallback meter
width is 10.
prefix : str, optional
Prefix message (included in total width).
unit : str, optional
String that will be used to define the unit of each iteration.
[default: "it"]
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.). [default: False]
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
Returns
-------
out : Formatted meter and stats, ready to display. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/tqdm/_tqdm.py#L44-L151 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | getIcon | def getIcon( data ):
"""Return the data from the resource as a wxIcon"""
import cStringIO
stream = cStringIO.StringIO(data)
image = wx.ImageFromStream(stream)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.BitmapFromImage(image))
return icon | python | def getIcon( data ):
"""Return the data from the resource as a wxIcon"""
import cStringIO
stream = cStringIO.StringIO(data)
image = wx.ImageFromStream(stream)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.BitmapFromImage(image))
return icon | Return the data from the resource as a wxIcon | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L841-L848 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | main | def main():
"""Mainloop for the application"""
logging.basicConfig(level=logging.INFO)
app = RunSnakeRunApp(0)
app.MainLoop() | python | def main():
"""Mainloop for the application"""
logging.basicConfig(level=logging.INFO)
app = RunSnakeRunApp(0)
app.MainLoop() | Mainloop for the application | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L873-L877 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.CreateControls | def CreateControls(self, config_parser):
"""Create our sub-controls"""
self.CreateMenuBar()
self.SetupToolBar()
self.CreateStatusBar()
self.leftSplitter = wx.SplitterWindow(
self
)
self.rightSplitter = wx.SplitterWindow(
self.leftSplitter
)
self.listControl = listviews.DataView(
self.leftSplitter,
columns = PROFILE_VIEW_COLUMNS,
name='mainlist',
)
self.squareMap = squaremap.SquareMap(
self.rightSplitter,
padding = 6,
labels = True,
adapter = self.adapter,
square_style = True,
)
self.tabs = wx.Notebook(
self.rightSplitter,
)
self.CreateSourceWindow(self.tabs)
self.calleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='callee',
)
self.allCalleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcallee',
)
self.allCallerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcaller',
)
self.callerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='caller',
)
self.ProfileListControls = [
self.listControl,
self.calleeListControl,
self.allCalleeListControl,
self.callerListControl,
self.allCallerListControl,
]
self.tabs.AddPage(self.calleeListControl, _('Callees'), True)
self.tabs.AddPage(self.allCalleeListControl, _('All Callees'), False)
self.tabs.AddPage(self.callerListControl, _('Callers'), False)
self.tabs.AddPage(self.allCallerListControl, _('All Callers'), False)
if editor:
self.tabs.AddPage(self.sourceCodeControl, _('Source Code'), False)
self.rightSplitter.SetSashSize(10)
# calculate size as proportional value for initial display...
self.LoadState( config_parser )
width, height = self.GetSizeTuple()
rightsplit = 2 * (height // 3)
leftsplit = width // 3
self.rightSplitter.SplitHorizontally(self.squareMap, self.tabs,
rightsplit)
self.leftSplitter.SplitVertically(self.listControl, self.rightSplitter,
leftsplit)
squaremap.EVT_SQUARE_HIGHLIGHTED(self.squareMap,
self.OnSquareHighlightedMap)
squaremap.EVT_SQUARE_SELECTED(self.listControl,
self.OnSquareSelectedList)
squaremap.EVT_SQUARE_SELECTED(self.squareMap, self.OnSquareSelectedMap)
squaremap.EVT_SQUARE_ACTIVATED(self.squareMap, self.OnNodeActivated)
for control in self.ProfileListControls:
squaremap.EVT_SQUARE_ACTIVATED(control, self.OnNodeActivated)
squaremap.EVT_SQUARE_HIGHLIGHTED(control,
self.OnSquareHighlightedList)
self.moreSquareViewItem.Check(self.squareMap.square_style) | python | def CreateControls(self, config_parser):
"""Create our sub-controls"""
self.CreateMenuBar()
self.SetupToolBar()
self.CreateStatusBar()
self.leftSplitter = wx.SplitterWindow(
self
)
self.rightSplitter = wx.SplitterWindow(
self.leftSplitter
)
self.listControl = listviews.DataView(
self.leftSplitter,
columns = PROFILE_VIEW_COLUMNS,
name='mainlist',
)
self.squareMap = squaremap.SquareMap(
self.rightSplitter,
padding = 6,
labels = True,
adapter = self.adapter,
square_style = True,
)
self.tabs = wx.Notebook(
self.rightSplitter,
)
self.CreateSourceWindow(self.tabs)
self.calleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='callee',
)
self.allCalleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcallee',
)
self.allCallerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcaller',
)
self.callerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='caller',
)
self.ProfileListControls = [
self.listControl,
self.calleeListControl,
self.allCalleeListControl,
self.callerListControl,
self.allCallerListControl,
]
self.tabs.AddPage(self.calleeListControl, _('Callees'), True)
self.tabs.AddPage(self.allCalleeListControl, _('All Callees'), False)
self.tabs.AddPage(self.callerListControl, _('Callers'), False)
self.tabs.AddPage(self.allCallerListControl, _('All Callers'), False)
if editor:
self.tabs.AddPage(self.sourceCodeControl, _('Source Code'), False)
self.rightSplitter.SetSashSize(10)
# calculate size as proportional value for initial display...
self.LoadState( config_parser )
width, height = self.GetSizeTuple()
rightsplit = 2 * (height // 3)
leftsplit = width // 3
self.rightSplitter.SplitHorizontally(self.squareMap, self.tabs,
rightsplit)
self.leftSplitter.SplitVertically(self.listControl, self.rightSplitter,
leftsplit)
squaremap.EVT_SQUARE_HIGHLIGHTED(self.squareMap,
self.OnSquareHighlightedMap)
squaremap.EVT_SQUARE_SELECTED(self.listControl,
self.OnSquareSelectedList)
squaremap.EVT_SQUARE_SELECTED(self.squareMap, self.OnSquareSelectedMap)
squaremap.EVT_SQUARE_ACTIVATED(self.squareMap, self.OnNodeActivated)
for control in self.ProfileListControls:
squaremap.EVT_SQUARE_ACTIVATED(control, self.OnNodeActivated)
squaremap.EVT_SQUARE_HIGHLIGHTED(control,
self.OnSquareHighlightedList)
self.moreSquareViewItem.Check(self.squareMap.square_style) | Create our sub-controls | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L229-L311 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.CreateMenuBar | def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
# self.packageMenuItem = menu.AppendCheckItem(
# ID_PACKAGE_VIEW, _('&File View'),
# _('View time spent by package/module')
# )
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
# it's more about how to generate graphics to describe profiling...
self.deeperViewItem = menu.Append(
ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views')
)
self.shallowerViewItem = menu.Append(
ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views')
)
# wx.ToolTip.Enable(True)
menubar.Append(menu, _('&View'))
self.viewTypeMenu =wx.Menu( )
menubar.Append(self.viewTypeMenu, _('View &Type'))
self.SetMenuBar(menubar)
wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True))
wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile)
wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory)
wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView)
wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView)
wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView)
wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView)
wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView)
wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView)
wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle) | python | def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
# self.packageMenuItem = menu.AppendCheckItem(
# ID_PACKAGE_VIEW, _('&File View'),
# _('View time spent by package/module')
# )
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
# it's more about how to generate graphics to describe profiling...
self.deeperViewItem = menu.Append(
ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views')
)
self.shallowerViewItem = menu.Append(
ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views')
)
# wx.ToolTip.Enable(True)
menubar.Append(menu, _('&View'))
self.viewTypeMenu =wx.Menu( )
menubar.Append(self.viewTypeMenu, _('View &Type'))
self.SetMenuBar(menubar)
wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True))
wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile)
wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory)
wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView)
wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView)
wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView)
wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView)
wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView)
wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView)
wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle) | Create our menu-bar for triggering operations | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L313-L373 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.CreateSourceWindow | def CreateSourceWindow(self, tabs):
"""Create our source-view window for tabs"""
if editor and self.sourceCodeControl is None:
self.sourceCodeControl = wx.py.editwindow.EditWindow(
self.tabs, -1
)
self.sourceCodeControl.SetText(u"")
self.sourceFileShown = None
self.sourceCodeControl.setDisplayLineNumbers(True) | python | def CreateSourceWindow(self, tabs):
"""Create our source-view window for tabs"""
if editor and self.sourceCodeControl is None:
self.sourceCodeControl = wx.py.editwindow.EditWindow(
self.tabs, -1
)
self.sourceCodeControl.SetText(u"")
self.sourceFileShown = None
self.sourceCodeControl.setDisplayLineNumbers(True) | Create our source-view window for tabs | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L383-L391 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SetupToolBar | def SetupToolBar(self):
"""Create the toolbar for common actions"""
tb = self.CreateToolBar(self.TBFLAGS)
tsize = (24, 24)
tb.ToolBitmapSize = tsize
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,
tsize)
tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open",
longHelp="Open a (c)Profile trace file")
if not osx:
tb.AddSeparator()
# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)
self.rootViewTool = tb.AddLabelTool(
ID_ROOT_VIEW, _("Root View"),
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),
shortHelp=_("Display the root of the current view tree (home view)")
)
self.rootViewTool = tb.AddLabelTool(
ID_BACK_VIEW, _("Back"),
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),
shortHelp=_("Back to the previously activated node in the call tree")
)
self.upViewTool = tb.AddLabelTool(
ID_UP_VIEW, _("Up"),
wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),
shortHelp=_("Go one level up the call tree (highest-percentage parent)")
)
if not osx:
tb.AddSeparator()
# TODO: figure out why the control is sizing the label incorrectly on Linux
self.percentageViewTool = wx.CheckBox(tb, -1, _("Percent "))
self.percentageViewTool.SetToolTip(wx.ToolTip(
_("Toggle display of percentages in list views")))
tb.AddControl(self.percentageViewTool)
wx.EVT_CHECKBOX(self.percentageViewTool,
self.percentageViewTool.GetId(), self.OnPercentageView)
self.viewTypeTool= wx.Choice( tb, -1, choices= getattr(self.loader,'ROOTS',[]) )
self.viewTypeTool.SetToolTip(wx.ToolTip(
_("Switch between different hierarchic views of the data")))
wx.EVT_CHOICE( self.viewTypeTool, self.viewTypeTool.GetId(), self.OnViewTypeTool )
tb.AddControl( self.viewTypeTool )
tb.Realize() | python | def SetupToolBar(self):
"""Create the toolbar for common actions"""
tb = self.CreateToolBar(self.TBFLAGS)
tsize = (24, 24)
tb.ToolBitmapSize = tsize
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,
tsize)
tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open",
longHelp="Open a (c)Profile trace file")
if not osx:
tb.AddSeparator()
# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)
self.rootViewTool = tb.AddLabelTool(
ID_ROOT_VIEW, _("Root View"),
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),
shortHelp=_("Display the root of the current view tree (home view)")
)
self.rootViewTool = tb.AddLabelTool(
ID_BACK_VIEW, _("Back"),
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),
shortHelp=_("Back to the previously activated node in the call tree")
)
self.upViewTool = tb.AddLabelTool(
ID_UP_VIEW, _("Up"),
wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),
shortHelp=_("Go one level up the call tree (highest-percentage parent)")
)
if not osx:
tb.AddSeparator()
# TODO: figure out why the control is sizing the label incorrectly on Linux
self.percentageViewTool = wx.CheckBox(tb, -1, _("Percent "))
self.percentageViewTool.SetToolTip(wx.ToolTip(
_("Toggle display of percentages in list views")))
tb.AddControl(self.percentageViewTool)
wx.EVT_CHECKBOX(self.percentageViewTool,
self.percentageViewTool.GetId(), self.OnPercentageView)
self.viewTypeTool= wx.Choice( tb, -1, choices= getattr(self.loader,'ROOTS',[]) )
self.viewTypeTool.SetToolTip(wx.ToolTip(
_("Switch between different hierarchic views of the data")))
wx.EVT_CHOICE( self.viewTypeTool, self.viewTypeTool.GetId(), self.OnViewTypeTool )
tb.AddControl( self.viewTypeTool )
tb.Realize() | Create the toolbar for common actions | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L393-L435 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnViewTypeTool | def OnViewTypeTool( self, event ):
"""When the user changes the selection, make that our selection"""
new = self.viewTypeTool.GetStringSelection()
if new != self.viewType:
self.viewType = new
self.OnRootView( event ) | python | def OnViewTypeTool( self, event ):
"""When the user changes the selection, make that our selection"""
new = self.viewTypeTool.GetStringSelection()
if new != self.viewType:
self.viewType = new
self.OnRootView( event ) | When the user changes the selection, make that our selection | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L437-L442 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.ConfigureViewTypeChoices | def ConfigureViewTypeChoices( self, event=None ):
"""Configure the set of View types in the toolbar (and menus)"""
self.viewTypeTool.SetItems( getattr( self.loader, 'ROOTS', [] ))
if self.loader and self.viewType in self.loader.ROOTS:
self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType ))
# configure the menu with the available choices...
def chooser( typ ):
def Callback( event ):
if typ != self.viewType:
self.viewType = typ
self.OnRootView( event )
return Callback
# Clear all previous items
for item in self.viewTypeMenu.GetMenuItems():
self.viewTypeMenu.DeleteItem( item )
if self.loader and self.loader.ROOTS:
for root in self.loader.ROOTS:
item = wx.MenuItem(
self.viewTypeMenu, -1, root.title(),
_("View hierarchy by %(name)s")%{
'name': root.title(),
},
kind=wx.ITEM_RADIO,
)
item.SetCheckable( True )
self.viewTypeMenu.AppendItem( item )
item.Check( root == self.viewType )
wx.EVT_MENU( self, item.GetId(), chooser( root )) | python | def ConfigureViewTypeChoices( self, event=None ):
"""Configure the set of View types in the toolbar (and menus)"""
self.viewTypeTool.SetItems( getattr( self.loader, 'ROOTS', [] ))
if self.loader and self.viewType in self.loader.ROOTS:
self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType ))
# configure the menu with the available choices...
def chooser( typ ):
def Callback( event ):
if typ != self.viewType:
self.viewType = typ
self.OnRootView( event )
return Callback
# Clear all previous items
for item in self.viewTypeMenu.GetMenuItems():
self.viewTypeMenu.DeleteItem( item )
if self.loader and self.loader.ROOTS:
for root in self.loader.ROOTS:
item = wx.MenuItem(
self.viewTypeMenu, -1, root.title(),
_("View hierarchy by %(name)s")%{
'name': root.title(),
},
kind=wx.ITEM_RADIO,
)
item.SetCheckable( True )
self.viewTypeMenu.AppendItem( item )
item.Check( root == self.viewType )
wx.EVT_MENU( self, item.GetId(), chooser( root )) | Configure the set of View types in the toolbar (and menus) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L444-L472 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnOpenFile | def OnOpenFile(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load(*paths)
else:
self.load(*paths) | python | def OnOpenFile(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load(*paths)
else:
self.load(*paths) | Request to open a new profile file | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L474-L485 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnOpenMemory | def OnOpenMemory(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load_memory(path)
else:
self.load_memory(path) | python | def OnOpenMemory(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load_memory(path)
else:
self.load_memory(path) | Request to open a new profile file | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L486-L497 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SetPackageView | def SetPackageView(self, directoryView):
"""Set whether to use directory/package based view"""
self.directoryView = not self.directoryView
self.packageMenuItem.Check(self.directoryView)
self.packageViewTool.SetValue(self.directoryView)
if self.loader:
self.SetModel(self.loader)
self.RecordHistory() | python | def SetPackageView(self, directoryView):
"""Set whether to use directory/package based view"""
self.directoryView = not self.directoryView
self.packageMenuItem.Check(self.directoryView)
self.packageViewTool.SetValue(self.directoryView)
if self.loader:
self.SetModel(self.loader)
self.RecordHistory() | Set whether to use directory/package based view | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L519-L526 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SetPercentageView | def SetPercentageView(self, percentageView):
"""Set whether to display percentage or absolute values"""
self.percentageView = percentageView
self.percentageMenuItem.Check(self.percentageView)
self.percentageViewTool.SetValue(self.percentageView)
total = self.adapter.value( self.loader.get_root( self.viewType ) )
for control in self.ProfileListControls:
control.SetPercentage(self.percentageView, total)
self.adapter.SetPercentage(self.percentageView, total) | python | def SetPercentageView(self, percentageView):
"""Set whether to display percentage or absolute values"""
self.percentageView = percentageView
self.percentageMenuItem.Check(self.percentageView)
self.percentageViewTool.SetValue(self.percentageView)
total = self.adapter.value( self.loader.get_root( self.viewType ) )
for control in self.ProfileListControls:
control.SetPercentage(self.percentageView, total)
self.adapter.SetPercentage(self.percentageView, total) | Set whether to display percentage or absolute values | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L532-L540 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnUpView | def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node')) | python | def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node')) | Request to move up the hierarchy to highest-weight parent | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L542-L564 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnBackView | def OnBackView(self, event):
"""Request to move backward in the history"""
self.historyIndex -= 1
try:
self.RestoreHistory(self.history[self.historyIndex])
except IndexError, err:
self.SetStatusText(_('No further history available')) | python | def OnBackView(self, event):
"""Request to move backward in the history"""
self.historyIndex -= 1
try:
self.RestoreHistory(self.history[self.historyIndex])
except IndexError, err:
self.SetStatusText(_('No further history available')) | Request to move backward in the history | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L566-L572 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnRootView | def OnRootView(self, event):
"""Reset view to the root of the tree"""
self.adapter, tree, rows = self.RootNode()
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory()
self.ConfigureViewTypeChoices() | python | def OnRootView(self, event):
"""Reset view to the root of the tree"""
self.adapter, tree, rows = self.RootNode()
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory()
self.ConfigureViewTypeChoices() | Reset view to the root of the tree | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L574-L579 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnNodeActivated | def OnNodeActivated(self, event):
"""Double-click or enter on a node in some control..."""
self.activated_node = self.selected_node = event.node
self.squareMap.SetModel(event.node, self.adapter)
self.squareMap.SetSelected( event.node )
if editor:
if self.SourceShowFile(event.node):
if hasattr(event.node,'lineno'):
self.sourceCodeControl.GotoLine(event.node.lineno)
self.RecordHistory() | python | def OnNodeActivated(self, event):
"""Double-click or enter on a node in some control..."""
self.activated_node = self.selected_node = event.node
self.squareMap.SetModel(event.node, self.adapter)
self.squareMap.SetSelected( event.node )
if editor:
if self.SourceShowFile(event.node):
if hasattr(event.node,'lineno'):
self.sourceCodeControl.GotoLine(event.node.lineno)
self.RecordHistory() | Double-click or enter on a node in some control... | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L581-L590 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SourceShowFile | def SourceShowFile(self, node):
"""Show the given file in the source-code view (attempt it anyway)"""
filename = self.adapter.filename( node )
if filename and self.sourceFileShown != filename:
try:
data = open(filename).read()
except Exception, err:
# TODO: load from zips/eggs? What about .pyc issues?
return None
else:
#self.sourceCodeControl.setText(data)
self.sourceCodeControl.ClearAll()
self.sourceCodeControl.AppendText( data )
return filename | python | def SourceShowFile(self, node):
"""Show the given file in the source-code view (attempt it anyway)"""
filename = self.adapter.filename( node )
if filename and self.sourceFileShown != filename:
try:
data = open(filename).read()
except Exception, err:
# TODO: load from zips/eggs? What about .pyc issues?
return None
else:
#self.sourceCodeControl.setText(data)
self.sourceCodeControl.ClearAll()
self.sourceCodeControl.AppendText( data )
return filename | Show the given file in the source-code view (attempt it anyway) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L592-L605 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnSquareSelected | def OnSquareSelected(self, event):
"""Update all views to show selection children/parents"""
self.selected_node = event.node
self.calleeListControl.integrateRecords(self.adapter.children( event.node) )
self.callerListControl.integrateRecords(self.adapter.parents( event.node) ) | python | def OnSquareSelected(self, event):
"""Update all views to show selection children/parents"""
self.selected_node = event.node
self.calleeListControl.integrateRecords(self.adapter.children( event.node) )
self.callerListControl.integrateRecords(self.adapter.parents( event.node) ) | Update all views to show selection children/parents | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L629-L633 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.OnMoreSquareToggle | def OnMoreSquareToggle( self, event ):
"""Toggle the more-square view (better looking, but more likely to filter records)"""
self.squareMap.square_style = not self.squareMap.square_style
self.squareMap.Refresh()
self.moreSquareViewItem.Check(self.squareMap.square_style) | python | def OnMoreSquareToggle( self, event ):
"""Toggle the more-square view (better looking, but more likely to filter records)"""
self.squareMap.square_style = not self.squareMap.square_style
self.squareMap.Refresh()
self.moreSquareViewItem.Check(self.squareMap.square_style) | Toggle the more-square view (better looking, but more likely to filter records) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L637-L641 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.RecordHistory | def RecordHistory(self):
"""Add the given node to the history-set"""
if not self.restoringHistory:
record = self.activated_node
if self.historyIndex < -1:
try:
del self.history[self.historyIndex+1:]
except AttributeError, err:
pass
if (not self.history) or record != self.history[-1]:
self.history.append(record)
del self.history[:-200]
self.historyIndex = -1 | python | def RecordHistory(self):
"""Add the given node to the history-set"""
if not self.restoringHistory:
record = self.activated_node
if self.historyIndex < -1:
try:
del self.history[self.historyIndex+1:]
except AttributeError, err:
pass
if (not self.history) or record != self.history[-1]:
self.history.append(record)
del self.history[:-200]
self.historyIndex = -1 | Add the given node to the history-set | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L645-L657 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.load | def load(self, *filenames):
"""Load our dataset (iteratively)"""
if len(filenames) == 1:
if os.path.basename( filenames[0] ) == 'index.coldshot':
return self.load_coldshot( os.path.dirname( filenames[0]) )
elif os.path.isdir( filenames[0] ):
return self.load_coldshot( filenames[0] )
try:
self.loader = pstatsloader.PStatsLoader(*filenames)
self.ConfigureViewTypeChoices()
self.SetModel( self.loader )
self.viewType = self.loader.ROOTS[0]
self.SetTitle(_("Run Snake Run: %(filenames)s")
% {'filenames': ', '.join(filenames)[:120]})
except (IOError, OSError, ValueError,MemoryError), err:
self.SetStatusText(
_('Failure during load of %(filenames)s: %(err)s'
) % dict(
filenames=" ".join([repr(x) for x in filenames]),
err=err
)) | python | def load(self, *filenames):
"""Load our dataset (iteratively)"""
if len(filenames) == 1:
if os.path.basename( filenames[0] ) == 'index.coldshot':
return self.load_coldshot( os.path.dirname( filenames[0]) )
elif os.path.isdir( filenames[0] ):
return self.load_coldshot( filenames[0] )
try:
self.loader = pstatsloader.PStatsLoader(*filenames)
self.ConfigureViewTypeChoices()
self.SetModel( self.loader )
self.viewType = self.loader.ROOTS[0]
self.SetTitle(_("Run Snake Run: %(filenames)s")
% {'filenames': ', '.join(filenames)[:120]})
except (IOError, OSError, ValueError,MemoryError), err:
self.SetStatusText(
_('Failure during load of %(filenames)s: %(err)s'
) % dict(
filenames=" ".join([repr(x) for x in filenames]),
err=err
)) | Load our dataset (iteratively) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L673-L693 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SetModel | def SetModel(self, loader):
"""Set our overall model (a loader object) and populate sub-controls"""
self.loader = loader
self.adapter, tree, rows = self.RootNode()
self.listControl.integrateRecords(rows.values())
self.activated_node = tree
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory() | python | def SetModel(self, loader):
"""Set our overall model (a loader object) and populate sub-controls"""
self.loader = loader
self.adapter, tree, rows = self.RootNode()
self.listControl.integrateRecords(rows.values())
self.activated_node = tree
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory() | Set our overall model (a loader object) and populate sub-controls | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L710-L717 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.RootNode | def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows | python | def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows | Return our current root node and appropriate adapter for it | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L719-L727 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.SaveState | def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', str(size[0]) )
config_parser.set( 'window', 'height', str(size[1]) )
config_parser.set( 'window', 'x', str(position[0]) )
config_parser.set( 'window', 'y', str(position[1]) )
for control in self.ProfileListControls:
control.SaveState( config_parser )
return config_parser | python | def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', str(size[0]) )
config_parser.set( 'window', 'height', str(size[1]) )
config_parser.set( 'window', 'x', str(position[0]) )
config_parser.set( 'window', 'y', str(position[1]) )
for control in self.ProfileListControls:
control.SaveState( config_parser )
return config_parser | Retrieve window state to be restored on the next run... | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L729-L747 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MainFrame.LoadState | def LoadState( self, config_parser ):
"""Set our window state from the given config_parser instance"""
if not config_parser:
return
if (
not config_parser.has_section( 'window' ) or (
config_parser.has_option( 'window','maximized' ) and
config_parser.getboolean( 'window', 'maximized' )
)
):
self.Maximize(True)
try:
width,height,x,y = [
config_parser.getint( 'window',key )
for key in ['width','height','x','y']
]
self.SetPosition( (x,y))
self.SetSize( (width,height))
except ConfigParser.NoSectionError, err:
# the file isn't written yet, so don't even warn...
pass
except Exception, err:
# this is just convenience, if it breaks in *any* way, ignore it...
log.error(
"Unable to load window preferences, ignoring: %s", traceback.format_exc()
)
try:
font_size = config_parser.getint('window', 'font_size')
except Exception:
pass # use the default, by default
else:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font_size)
for ctrl in self.ProfileListControls:
ctrl.SetFont(font)
for control in self.ProfileListControls:
control.LoadState( config_parser )
self.config = config_parser
wx.EVT_CLOSE( self, self.OnCloseWindow ) | python | def LoadState( self, config_parser ):
"""Set our window state from the given config_parser instance"""
if not config_parser:
return
if (
not config_parser.has_section( 'window' ) or (
config_parser.has_option( 'window','maximized' ) and
config_parser.getboolean( 'window', 'maximized' )
)
):
self.Maximize(True)
try:
width,height,x,y = [
config_parser.getint( 'window',key )
for key in ['width','height','x','y']
]
self.SetPosition( (x,y))
self.SetSize( (width,height))
except ConfigParser.NoSectionError, err:
# the file isn't written yet, so don't even warn...
pass
except Exception, err:
# this is just convenience, if it breaks in *any* way, ignore it...
log.error(
"Unable to load window preferences, ignoring: %s", traceback.format_exc()
)
try:
font_size = config_parser.getint('window', 'font_size')
except Exception:
pass # use the default, by default
else:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font_size)
for ctrl in self.ProfileListControls:
ctrl.SetFont(font)
for control in self.ProfileListControls:
control.LoadState( config_parser )
self.config = config_parser
wx.EVT_CLOSE( self, self.OnCloseWindow ) | Set our window state from the given config_parser instance | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L750-L791 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | RunSnakeRunApp.OnInit | def OnInit(self, profile=None, memoryProfile=None):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if profile:
wx.CallAfter(frame.load, *[profile])
elif sys.argv[1:]:
if sys.argv[1] == '-m':
if sys.argv[2:]:
wx.CallAfter( frame.load_memory, sys.argv[2] )
else:
log.warn( 'No memory file specified' )
else:
wx.CallAfter(frame.load, *sys.argv[1:])
return True | python | def OnInit(self, profile=None, memoryProfile=None):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if profile:
wx.CallAfter(frame.load, *[profile])
elif sys.argv[1:]:
if sys.argv[1] == '-m':
if sys.argv[2:]:
wx.CallAfter( frame.load_memory, sys.argv[2] )
else:
log.warn( 'No memory file specified' )
else:
wx.CallAfter(frame.load, *sys.argv[1:])
return True | Initialise the application | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L808-L824 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py | MeliaeViewApp.OnInit | def OnInit(self):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if sys.argv[1:]:
wx.CallAfter( frame.load_memory, sys.argv[1] )
else:
log.warn( 'No memory file specified' )
return True | python | def OnInit(self):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if sys.argv[1:]:
wx.CallAfter( frame.load_memory, sys.argv[1] )
else:
log.warn( 'No memory file specified' )
return True | Initialise the application | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L828-L838 |
lrq3000/pyFileFixity | pyFileFixity/lib/distance/distance/_fastcomp.py | fast_comp | def fast_comp(seq1, seq2, transpositions=False):
"""Compute the distance between the two sequences `seq1` and `seq2` up to a
maximum of 2 included, and return it. If the edit distance between the two
sequences is higher than that, -1 is returned.
If `transpositions` is `True`, transpositions will be taken into account for
the computation of the distance. This can make a difference, e.g.:
>>> fast_comp("abc", "bac", transpositions=False)
2
>>> fast_comp("abc", "bac", transpositions=True)
1
This is faster than `levenshtein` by an order of magnitude, but on the
other hand is of limited use.
The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`.
I've added transpositions support to the original code.
"""
replace, insert, delete = "r", "i", "d"
L1, L2 = len(seq1), len(seq2)
if L1 < L2:
L1, L2 = L2, L1
seq1, seq2 = seq2, seq1
ldiff = L1 - L2
if ldiff == 0:
models = (insert+delete, delete+insert, replace+replace)
elif ldiff == 1:
models = (delete+replace, replace+delete)
elif ldiff == 2:
models = (delete+delete,)
else:
return -1
res = 3
for model in models:
i = j = c = 0
while (i < L1) and (j < L2):
if seq1[i] != seq2[j]:
c = c+1
if 2 < c:
break
if transpositions and ldiff != 2 \
and i < L1 - 1 and j < L2 - 1 \
and seq1[i+1] == seq2[j] and seq1[i] == seq2[j+1]:
i, j = i+2, j+2
else:
cmd = model[c-1]
if cmd == delete:
i = i+1
elif cmd == insert:
j = j+1
else:
assert cmd == replace
i,j = i+1, j+1
else:
i,j = i+1, j+1
if 2 < c:
continue
elif i < L1:
if L1-i <= model[c:].count(delete):
c = c + (L1-i)
else:
continue
elif j < L2:
if L2-j <= model[c:].count(insert):
c = c + (L2-j)
else:
continue
if c < res:
res = c
if res == 3:
res = -1
return res | python | def fast_comp(seq1, seq2, transpositions=False):
"""Compute the distance between the two sequences `seq1` and `seq2` up to a
maximum of 2 included, and return it. If the edit distance between the two
sequences is higher than that, -1 is returned.
If `transpositions` is `True`, transpositions will be taken into account for
the computation of the distance. This can make a difference, e.g.:
>>> fast_comp("abc", "bac", transpositions=False)
2
>>> fast_comp("abc", "bac", transpositions=True)
1
This is faster than `levenshtein` by an order of magnitude, but on the
other hand is of limited use.
The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`.
I've added transpositions support to the original code.
"""
replace, insert, delete = "r", "i", "d"
L1, L2 = len(seq1), len(seq2)
if L1 < L2:
L1, L2 = L2, L1
seq1, seq2 = seq2, seq1
ldiff = L1 - L2
if ldiff == 0:
models = (insert+delete, delete+insert, replace+replace)
elif ldiff == 1:
models = (delete+replace, replace+delete)
elif ldiff == 2:
models = (delete+delete,)
else:
return -1
res = 3
for model in models:
i = j = c = 0
while (i < L1) and (j < L2):
if seq1[i] != seq2[j]:
c = c+1
if 2 < c:
break
if transpositions and ldiff != 2 \
and i < L1 - 1 and j < L2 - 1 \
and seq1[i+1] == seq2[j] and seq1[i] == seq2[j+1]:
i, j = i+2, j+2
else:
cmd = model[c-1]
if cmd == delete:
i = i+1
elif cmd == insert:
j = j+1
else:
assert cmd == replace
i,j = i+1, j+1
else:
i,j = i+1, j+1
if 2 < c:
continue
elif i < L1:
if L1-i <= model[c:].count(delete):
c = c + (L1-i)
else:
continue
elif j < L2:
if L2-j <= model[c:].count(insert):
c = c + (L2-j)
else:
continue
if c < res:
res = c
if res == 3:
res = -1
return res | Compute the distance between the two sequences `seq1` and `seq2` up to a
maximum of 2 included, and return it. If the edit distance between the two
sequences is higher than that, -1 is returned.
If `transpositions` is `True`, transpositions will be taken into account for
the computation of the distance. This can make a difference, e.g.:
>>> fast_comp("abc", "bac", transpositions=False)
2
>>> fast_comp("abc", "bac", transpositions=True)
1
This is faster than `levenshtein` by an order of magnitude, but on the
other hand is of limited use.
The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`.
I've added transpositions support to the original code. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_fastcomp.py#L3-L82 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | is_file | def is_file(dirname):
'''Checks if a path is an actual file that exists'''
if not os.path.isfile(dirname):
msg = "{0} is not an existing file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | python | def is_file(dirname):
'''Checks if a path is an actual file that exists'''
if not os.path.isfile(dirname):
msg = "{0} is not an existing file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | Checks if a path is an actual file that exists | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L19-L25 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | is_dir | def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | python | def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | Checks if a path is an actual directory that exists | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L27-L33 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | is_dir_or_file | def is_dir_or_file(dirname):
'''Checks if a path is an actual directory that exists or a file'''
if not os.path.isdir(dirname) and not os.path.isfile(dirname):
msg = "{0} is not a directory nor a file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | python | def is_dir_or_file(dirname):
'''Checks if a path is an actual directory that exists or a file'''
if not os.path.isdir(dirname) and not os.path.isfile(dirname):
msg = "{0} is not a directory nor a file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | Checks if a path is an actual directory that exists or a file | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L35-L41 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | fullpath | def fullpath(relpath):
'''Relative path to absolute'''
if (type(relpath) is object or type(relpath) is file):
relpath = relpath.name
return os.path.abspath(os.path.expanduser(relpath)) | python | def fullpath(relpath):
'''Relative path to absolute'''
if (type(relpath) is object or type(relpath) is file):
relpath = relpath.name
return os.path.abspath(os.path.expanduser(relpath)) | Relative path to absolute | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L43-L47 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | recwalk | def recwalk(inputpath, sorting=True):
'''Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator.'''
# If it's only a single file, return this single file
if os.path.isfile(inputpath):
abs_path = fullpath(inputpath)
yield os.path.dirname(abs_path), os.path.basename(abs_path)
# Else if it's a folder, walk recursively and return every files
else:
for dirpath, dirs, files in walk(inputpath):
if sorting:
files.sort()
dirs.sort() # sort directories in-place for ordered recursive walking
for filename in files:
yield (dirpath, filename) | python | def recwalk(inputpath, sorting=True):
'''Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator.'''
# If it's only a single file, return this single file
if os.path.isfile(inputpath):
abs_path = fullpath(inputpath)
yield os.path.dirname(abs_path), os.path.basename(abs_path)
# Else if it's a folder, walk recursively and return every files
else:
for dirpath, dirs, files in walk(inputpath):
if sorting:
files.sort()
dirs.sort() # sort directories in-place for ordered recursive walking
for filename in files:
yield (dirpath, filename) | Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L49-L62 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | path2unix | def path2unix(path, nojoin=False, fromwinpath=False):
'''From a path given in any format, converts to posix path format
fromwinpath=True forces the input path to be recognized as a Windows path (useful on Unix machines to unit test Windows paths)'''
if fromwinpath:
pathparts = list(PureWindowsPath(path).parts)
else:
pathparts = list(PurePath(path).parts)
if nojoin:
return pathparts
else:
return posixpath.join(*pathparts) | python | def path2unix(path, nojoin=False, fromwinpath=False):
'''From a path given in any format, converts to posix path format
fromwinpath=True forces the input path to be recognized as a Windows path (useful on Unix machines to unit test Windows paths)'''
if fromwinpath:
pathparts = list(PureWindowsPath(path).parts)
else:
pathparts = list(PurePath(path).parts)
if nojoin:
return pathparts
else:
return posixpath.join(*pathparts) | From a path given in any format, converts to posix path format
fromwinpath=True forces the input path to be recognized as a Windows path (useful on Unix machines to unit test Windows paths) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L72-L82 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | get_next_entry | def get_next_entry(file, entrymarker="\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF", only_coord=True, blocksize=65535):
'''Find or read the next ecc entry in a given ecc file.
Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle).
This will read any string length between two entrymarkers.
The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok).
`file` is a file handle, not the path to the file.'''
found = False
start = None # start and end vars are the relative position of the starting/ending entrymarkers in the current buffer
end = None
startcursor = None # startcursor and endcursor are the absolute position of the starting/ending entrymarkers inside the database file
endcursor = None
buf = 1
# Sanity check: cannot screen the file's content if the window is of the same size as the pattern to match (the marker)
if blocksize <= len(entrymarker): blocksize = len(entrymarker) + 1
# Continue the search as long as we did not find at least one starting marker and one ending marker (or end of file)
while (not found and buf):
# Read a long block at once, we will readjust the file cursor after
buf = file.read(blocksize)
# Find the start marker (if not found already)
if start is None or start == -1:
start = buf.find(entrymarker); # relative position of the starting marker in the currently read string
if start >= 0 and not startcursor: # assign startcursor only if it's empty (meaning that we did not find the starting entrymarker, else if found we are only looking for
startcursor = file.tell() - len(buf) + start # absolute position of the starting marker in the file
if start >= 0: start = start + len(entrymarker)
# If we have a starting marker, we try to find a subsequent marker which will be the ending of our entry (if the entry is corrupted we don't care: it won't pass the entry_to_dict() decoding or subsequent steps of decoding and we will just pass to the next ecc entry). This allows to process any valid entry, no matter if previous ones were scrambled.
if startcursor is not None and startcursor >= 0:
end = buf.find(entrymarker, start)
if end < 0 and len(buf) < blocksize: # Special case: we didn't find any ending marker but we reached the end of file, then we are probably in fact just reading the last entry (thus there's no ending marker for this entry)
end = len(buf) # It's ok, we have our entry, the ending marker is just the end of file
# If we found an ending marker (or if end of file is reached), then we compute the absolute cursor value and put the file reading cursor back in position, just before the next entry (where the ending marker is if any)
if end >= 0:
endcursor = file.tell() - len(buf) + end
# Make sure we are not redetecting the same marker as the start marker
if endcursor > startcursor:
file.seek(endcursor)
found = True
else:
end = -1
encursor = None
#print("Start:", start, startcursor)
#print("End: ", end, endcursor)
# Stop criterion to avoid infinite loop: in the case we could not find any entry in the rest of the file and we reached the EOF, we just quit now
if len(buf) < blocksize: break
# Did not find the full entry in one buffer? Reinit variables for next iteration, but keep in memory startcursor.
if start > 0: start = 0 # reset the start position for the end buf find at next iteration (ie: in the arithmetic operations to compute the absolute endcursor position, the start entrymarker won't be accounted because it was discovered in a previous buffer).
if not endcursor: file.seek(file.tell()-len(entrymarker)) # Try to fix edge case where blocksize stops the buffer exactly in the middle of the ending entrymarker. The starting marker should always be ok because it should be quite close (or generally immediately after) the previous entry, but the end depends on the end of the current entry (size of the original file), thus the buffer may miss the ending entrymarker. should offset file.seek(-len(entrymarker)) before searching for ending.
if found: # if an entry was found, we seek to the beginning of the entry and then either read the entry from file or just return the markers positions (aka the entry bounds)
file.seek(startcursor + len(entrymarker))
if only_coord:
# Return only coordinates of the start and end markers
# Note: it is useful to just return the reading positions and not the entry itself because it can get quite huge and may overflow memory, thus we will read each ecc blocks on request using a generator.
return [startcursor + len(entrymarker), endcursor]
else:
# Return the full entry's content
return file.read(endcursor - startcursor - len(entrymarker))
else:
# Nothing found (or no new entry to find, we've already found them all), so we return None
return None | python | def get_next_entry(file, entrymarker="\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF", only_coord=True, blocksize=65535):
'''Find or read the next ecc entry in a given ecc file.
Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle).
This will read any string length between two entrymarkers.
The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok).
`file` is a file handle, not the path to the file.'''
found = False
start = None # start and end vars are the relative position of the starting/ending entrymarkers in the current buffer
end = None
startcursor = None # startcursor and endcursor are the absolute position of the starting/ending entrymarkers inside the database file
endcursor = None
buf = 1
# Sanity check: cannot screen the file's content if the window is of the same size as the pattern to match (the marker)
if blocksize <= len(entrymarker): blocksize = len(entrymarker) + 1
# Continue the search as long as we did not find at least one starting marker and one ending marker (or end of file)
while (not found and buf):
# Read a long block at once, we will readjust the file cursor after
buf = file.read(blocksize)
# Find the start marker (if not found already)
if start is None or start == -1:
start = buf.find(entrymarker); # relative position of the starting marker in the currently read string
if start >= 0 and not startcursor: # assign startcursor only if it's empty (meaning that we did not find the starting entrymarker, else if found we are only looking for
startcursor = file.tell() - len(buf) + start # absolute position of the starting marker in the file
if start >= 0: start = start + len(entrymarker)
# If we have a starting marker, we try to find a subsequent marker which will be the ending of our entry (if the entry is corrupted we don't care: it won't pass the entry_to_dict() decoding or subsequent steps of decoding and we will just pass to the next ecc entry). This allows to process any valid entry, no matter if previous ones were scrambled.
if startcursor is not None and startcursor >= 0:
end = buf.find(entrymarker, start)
if end < 0 and len(buf) < blocksize: # Special case: we didn't find any ending marker but we reached the end of file, then we are probably in fact just reading the last entry (thus there's no ending marker for this entry)
end = len(buf) # It's ok, we have our entry, the ending marker is just the end of file
# If we found an ending marker (or if end of file is reached), then we compute the absolute cursor value and put the file reading cursor back in position, just before the next entry (where the ending marker is if any)
if end >= 0:
endcursor = file.tell() - len(buf) + end
# Make sure we are not redetecting the same marker as the start marker
if endcursor > startcursor:
file.seek(endcursor)
found = True
else:
end = -1
encursor = None
#print("Start:", start, startcursor)
#print("End: ", end, endcursor)
# Stop criterion to avoid infinite loop: in the case we could not find any entry in the rest of the file and we reached the EOF, we just quit now
if len(buf) < blocksize: break
# Did not find the full entry in one buffer? Reinit variables for next iteration, but keep in memory startcursor.
if start > 0: start = 0 # reset the start position for the end buf find at next iteration (ie: in the arithmetic operations to compute the absolute endcursor position, the start entrymarker won't be accounted because it was discovered in a previous buffer).
if not endcursor: file.seek(file.tell()-len(entrymarker)) # Try to fix edge case where blocksize stops the buffer exactly in the middle of the ending entrymarker. The starting marker should always be ok because it should be quite close (or generally immediately after) the previous entry, but the end depends on the end of the current entry (size of the original file), thus the buffer may miss the ending entrymarker. should offset file.seek(-len(entrymarker)) before searching for ending.
if found: # if an entry was found, we seek to the beginning of the entry and then either read the entry from file or just return the markers positions (aka the entry bounds)
file.seek(startcursor + len(entrymarker))
if only_coord:
# Return only coordinates of the start and end markers
# Note: it is useful to just return the reading positions and not the entry itself because it can get quite huge and may overflow memory, thus we will read each ecc blocks on request using a generator.
return [startcursor + len(entrymarker), endcursor]
else:
# Return the full entry's content
return file.read(endcursor - startcursor - len(entrymarker))
else:
# Nothing found (or no new entry to find, we've already found them all), so we return None
return None | Find or read the next ecc entry in a given ecc file.
Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle).
This will read any string length between two entrymarkers.
The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok).
`file` is a file handle, not the path to the file. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L84-L142 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | remove_if_exist | def remove_if_exist(path): # pragma: no cover
"""Delete a file or a directory recursively if it exists, else no exception is raised"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
elif os.path.isfile(path):
os.remove(path)
return True
return False | python | def remove_if_exist(path): # pragma: no cover
"""Delete a file or a directory recursively if it exists, else no exception is raised"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
elif os.path.isfile(path):
os.remove(path)
return True
return False | Delete a file or a directory recursively if it exists, else no exception is raised | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L149-L158 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | copy_any | def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
srcfile = os.path.join(dirpath, filepath)
relpath = os.path.relpath(srcfile, src)
dstfile = os.path.join(dst, relpath)
if not os.path.exists(dstfile):
create_dir_if_not_exist(os.path.dirname(dstfile))
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
return True
elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)):
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return True
return False | python | def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
srcfile = os.path.join(dirpath, filepath)
relpath = os.path.relpath(srcfile, src)
dstfile = os.path.join(dst, relpath)
if not os.path.exists(dstfile):
create_dir_if_not_exist(os.path.dirname(dstfile))
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
return True
elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)):
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return True
return False | Copy a file or a directory tree, deleting the destination before processing | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L160-L182 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | group_files_by_size | def group_files_by_size(fileslist, multi): # pragma: no cover
''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C)
'''
flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True))
if multi <= 1:
fgrouped = {}
i = 0
for x in flord.keys():
i += 1
fgrouped[i] = [[x]]
return fgrouped
fgrouped = {}
i = 0
while flord:
i += 1
fgrouped[i] = []
big_key, big_value = flord.popitem(0)
fgrouped[i].append([big_key])
for j in xrange(multi-1):
cluster = []
if not flord: break
child_key, child_value = flord.popitem(0)
cluster.append(child_key)
if child_value == big_value:
fgrouped[i].append(cluster)
continue
else:
diff = big_value - child_value
for key, value in flord.iteritems():
if value <= diff:
cluster.append(key)
del flord[key]
if value == diff:
break
else:
child_value += value
diff = big_value - child_value
fgrouped[i].append(cluster)
return fgrouped | python | def group_files_by_size(fileslist, multi): # pragma: no cover
''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C)
'''
flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True))
if multi <= 1:
fgrouped = {}
i = 0
for x in flord.keys():
i += 1
fgrouped[i] = [[x]]
return fgrouped
fgrouped = {}
i = 0
while flord:
i += 1
fgrouped[i] = []
big_key, big_value = flord.popitem(0)
fgrouped[i].append([big_key])
for j in xrange(multi-1):
cluster = []
if not flord: break
child_key, child_value = flord.popitem(0)
cluster.append(child_key)
if child_value == big_value:
fgrouped[i].append(cluster)
continue
else:
diff = big_value - child_value
for key, value in flord.iteritems():
if value <= diff:
cluster.append(key)
del flord[key]
if value == diff:
break
else:
child_value += value
diff = big_value - child_value
fgrouped[i].append(cluster)
return fgrouped | Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L204-L260 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | group_files_by_size_fast | def group_files_by_size_fast(fileslist, nbgroups, mode=1): # pragma: no cover
'''Given a files list with sizes, output a list where the files are grouped in nbgroups per cluster.
Pseudo-code for algorithm in O(n log(g)) (thank's to insertion sort or binary search trees)
See for more infos: http://cs.stackexchange.com/questions/44406/fast-algorithm-for-clustering-groups-of-elements-given-their-size-time/44614#44614
For each file:
- If to-fill list is empty or file.size > first-key(to-fill):
* Create cluster c with file in first group g1
* Add to-fill[file.size].append([c, g2], [c, g3], ..., [c, gn])
- Else:
* ksize = first-key(to-fill)
* c, g = to-fill[ksize].popitem(0)
* Add file to cluster c in group g
* nsize = ksize - file.size
* if nsize > 0:
. to-fill[nsize].append([c, g])
. sort to-fill if not an automatic ordering structure
'''
ftofill = SortedList()
ftofill_pointer = {}
fgrouped = [] # [] or {}
ford = sorted(fileslist.iteritems(), key=lambda x: x[1])
last_cid = -1
while ford:
fname, fsize = ford.pop()
#print "----\n"+fname, fsize
#if ftofill: print "beforebranch", fsize, ftofill[-1]
#print ftofill
if not ftofill or fsize > ftofill[-1]:
last_cid += 1
#print "Branch A: create cluster %i" % last_cid
fgrouped.append([])
#fgrouped[last_cid] = []
fgrouped[last_cid].append([fname])
if mode==0:
for g in xrange(nbgroups-1, 0, -1):
fgrouped[last_cid].append([])
if not fsize in ftofill_pointer:
ftofill_pointer[fsize] = []
ftofill_pointer[fsize].append((last_cid, g))
ftofill.add(fsize)
else:
for g in xrange(1, nbgroups):
try:
fgname, fgsize = ford.pop()
#print "Added to group %i: %s %i" % (g, fgname, fgsize)
except IndexError:
break
fgrouped[last_cid].append([fgname])
diff_size = fsize - fgsize
if diff_size > 0:
if not diff_size in ftofill_pointer:
ftofill_pointer[diff_size] = []
ftofill_pointer[diff_size].append((last_cid, g))
ftofill.add(diff_size)
else:
#print "Branch B"
ksize = ftofill.pop()
c, g = ftofill_pointer[ksize].pop()
#print "Assign to cluster %i group %i" % (c, g)
fgrouped[c][g].append(fname)
nsize = ksize - fsize
if nsize > 0:
if not nsize in ftofill_pointer:
ftofill_pointer[nsize] = []
ftofill_pointer[nsize].append((c, g))
ftofill.add(nsize)
return fgrouped | python | def group_files_by_size_fast(fileslist, nbgroups, mode=1): # pragma: no cover
'''Given a files list with sizes, output a list where the files are grouped in nbgroups per cluster.
Pseudo-code for algorithm in O(n log(g)) (thank's to insertion sort or binary search trees)
See for more infos: http://cs.stackexchange.com/questions/44406/fast-algorithm-for-clustering-groups-of-elements-given-their-size-time/44614#44614
For each file:
- If to-fill list is empty or file.size > first-key(to-fill):
* Create cluster c with file in first group g1
* Add to-fill[file.size].append([c, g2], [c, g3], ..., [c, gn])
- Else:
* ksize = first-key(to-fill)
* c, g = to-fill[ksize].popitem(0)
* Add file to cluster c in group g
* nsize = ksize - file.size
* if nsize > 0:
. to-fill[nsize].append([c, g])
. sort to-fill if not an automatic ordering structure
'''
ftofill = SortedList()
ftofill_pointer = {}
fgrouped = [] # [] or {}
ford = sorted(fileslist.iteritems(), key=lambda x: x[1])
last_cid = -1
while ford:
fname, fsize = ford.pop()
#print "----\n"+fname, fsize
#if ftofill: print "beforebranch", fsize, ftofill[-1]
#print ftofill
if not ftofill or fsize > ftofill[-1]:
last_cid += 1
#print "Branch A: create cluster %i" % last_cid
fgrouped.append([])
#fgrouped[last_cid] = []
fgrouped[last_cid].append([fname])
if mode==0:
for g in xrange(nbgroups-1, 0, -1):
fgrouped[last_cid].append([])
if not fsize in ftofill_pointer:
ftofill_pointer[fsize] = []
ftofill_pointer[fsize].append((last_cid, g))
ftofill.add(fsize)
else:
for g in xrange(1, nbgroups):
try:
fgname, fgsize = ford.pop()
#print "Added to group %i: %s %i" % (g, fgname, fgsize)
except IndexError:
break
fgrouped[last_cid].append([fgname])
diff_size = fsize - fgsize
if diff_size > 0:
if not diff_size in ftofill_pointer:
ftofill_pointer[diff_size] = []
ftofill_pointer[diff_size].append((last_cid, g))
ftofill.add(diff_size)
else:
#print "Branch B"
ksize = ftofill.pop()
c, g = ftofill_pointer[ksize].pop()
#print "Assign to cluster %i group %i" % (c, g)
fgrouped[c][g].append(fname)
nsize = ksize - fsize
if nsize > 0:
if not nsize in ftofill_pointer:
ftofill_pointer[nsize] = []
ftofill_pointer[nsize].append((c, g))
ftofill.add(nsize)
return fgrouped | Given a files list with sizes, output a list where the files are grouped in nbgroups per cluster.
Pseudo-code for algorithm in O(n log(g)) (thank's to insertion sort or binary search trees)
See for more infos: http://cs.stackexchange.com/questions/44406/fast-algorithm-for-clustering-groups-of-elements-given-their-size-time/44614#44614
For each file:
- If to-fill list is empty or file.size > first-key(to-fill):
* Create cluster c with file in first group g1
* Add to-fill[file.size].append([c, g2], [c, g3], ..., [c, gn])
- Else:
* ksize = first-key(to-fill)
* c, g = to-fill[ksize].popitem(0)
* Add file to cluster c in group g
* nsize = ksize - file.size
* if nsize > 0:
. to-fill[nsize].append([c, g])
. sort to-fill if not an automatic ordering structure | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L262-L329 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | group_files_by_size_simple | def group_files_by_size_simple(fileslist, nbgroups): # pragma: no cover
""" Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together.
In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast. """
ford = sorted(fileslist.iteritems(), key=lambda x: x[1], reverse=True)
ford = [[x[0]] for x in ford]
return [group for group in grouper(nbgroups, ford)] | python | def group_files_by_size_simple(fileslist, nbgroups): # pragma: no cover
""" Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together.
In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast. """
ford = sorted(fileslist.iteritems(), key=lambda x: x[1], reverse=True)
ford = [[x[0]] for x in ford]
return [group for group in grouper(nbgroups, ford)] | Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together.
In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L331-L336 |
lrq3000/pyFileFixity | pyFileFixity/lib/aux_funcs.py | grouped_count_sizes | def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover
'''Compute the total size per group and total number of files. Useful to check that everything is OK.'''
fsizes = {}
total_files = 0
allitems = None
if isinstance(fgrouped, dict):
allitems = fgrouped.iteritems()
elif isinstance(fgrouped, list):
allitems = enumerate(fgrouped)
for fkey, cluster in allitems:
fsizes[fkey] = []
for subcluster in cluster:
tot = 0
if subcluster is not None:
for fname in subcluster:
tot += fileslist[fname]
total_files += 1
fsizes[fkey].append(tot)
return fsizes, total_files | python | def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover
'''Compute the total size per group and total number of files. Useful to check that everything is OK.'''
fsizes = {}
total_files = 0
allitems = None
if isinstance(fgrouped, dict):
allitems = fgrouped.iteritems()
elif isinstance(fgrouped, list):
allitems = enumerate(fgrouped)
for fkey, cluster in allitems:
fsizes[fkey] = []
for subcluster in cluster:
tot = 0
if subcluster is not None:
for fname in subcluster:
tot += fileslist[fname]
total_files += 1
fsizes[fkey].append(tot)
return fsizes, total_files | Compute the total size per group and total number of files. Useful to check that everything is OK. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L338-L356 |
lrq3000/pyFileFixity | pyFileFixity/lib/gooey/gui/windows/advanced_config.py | ConfigPanel.GetOptions | def GetOptions(self):
"""
returns the collective values from all of the
widgets contained in the panel"""
values = [c.GetValue()
for c in chain(*self.widgets)
if c.GetValue() is not None]
return ' '.join(values) | python | def GetOptions(self):
"""
returns the collective values from all of the
widgets contained in the panel"""
values = [c.GetValue()
for c in chain(*self.widgets)
if c.GetValue() is not None]
return ' '.join(values) | returns the collective values from all of the
widgets contained in the panel | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/windows/advanced_config.py#L92-L99 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.