rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
fp = RepoSide.rf_cache.get_fp(base_index + repo_rorp.index)
|
fp = RepoSide.rf_cache.get_fp(base_index + repo_rorp.index, repo_rorp)
|
def Verify(mirror_rp, inc_rp, verify_time): """Compute SHA1 sums of repository files and check against metadata""" assert mirror_rp.conn is Globals.local_connection repo_iter = RepoSide.init_and_get_iter(mirror_rp, inc_rp, verify_time) base_index = RepoSide.mirror_base.index bad_files = 0 for repo_rorp in repo_iter: if not repo_rorp.isreg(): continue if not repo_rorp.has_sha1(): log.Log("Warning: Cannot find SHA1 digest for file %s,\n" "perhaps because these feature was added in v1.1.1" % (repo_rorp.get_indexpath(),), 2) continue fp = RepoSide.rf_cache.get_fp(base_index + repo_rorp.index) computed_hash = hash.compute_sha1_fp(fp) if computed_hash == repo_rorp.get_sha1(): log.Log("Verified SHA1 digest of " + repo_rorp.get_indexpath(), 5) else: bad_files += 1 log.Log("Warning: Computed SHA1 digest of %s\n %s\n" "doesn't match recorded digest of\n %s\n" "Your backup repository may be corrupted!" % (repo_rorp.get_indexpath(), computed_hash, repo_rorp.get_sha1()), 2) RepoSide.close_rf_cache() if not bad_files: log.Log("Every file verified successfully.", 3) return bad_files
|
mir_rorp.setfile(cls.rf_cache.get_fp(base_index + index))
|
fp = cls.rf_cache.get_fp(base_index + index, mir_rorp) mir_rorp.setfile(fp)
|
def attach_files(cls, src_iter, mirror_rp, inc_rp, compare_time): """Attach data to all the files that need checking
|
"unable to compare." % (index,), 2)
|
"unable to compare." % (mir_rorp.get_indexpath(),), 2)
|
def hashes_changed(src_rp, mir_rorp): """Return 0 if their data hashes same, 1 otherwise""" if not mir_rorp.has_sha1(): log.Log("Warning: Metadata file has no digest for %s, " "unable to compare." % (index,), 2) return 0 elif (src_rp.getsize() == mir_rorp.getsize() and hash.compute_sha1(src_rp) == mir_rorp.get_sha1()): return 0 return 1
|
avoid this message by removing the rdiff_backup_data directory;
|
avoid this message by removing the rdiff-backup-data directory;
|
def checkdest_need_check(dest_rp): """Return None if no dest dir found, 1 if dest dir needs check, 0 o/w""" if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None for filename in Globals.rbdir.listdir(): if filename not in ['chars_to_quote', 'backup.log']: break else: # This may happen the first backup just after we test for quoting return None curmirroot = Globals.rbdir.append("current_mirror") curmir_incs = restore.get_inclist(curmirroot) if not curmir_incs: Log.FatalError(
|
the rdiff_backup_data directory because there is no important
|
the rdiff-backup-data directory because there is no important
|
def checkdest_need_check(dest_rp): """Return None if no dest dir found, 1 if dest dir needs check, 0 o/w""" if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None for filename in Globals.rbdir.listdir(): if filename not in ['chars_to_quote', 'backup.log']: break else: # This may happen the first backup just after we test for quoting return None curmirroot = Globals.rbdir.append("current_mirror") curmir_incs = restore.get_inclist(curmirroot) if not curmir_incs: Log.FatalError(
|
fsobj = FSSpec(self.path)
|
fsobj = FSSpec(rpath.path)
|
def carbonfile_get(rpath): """Return carbonfile value for local rpath""" from Carbon.File import FSSpec import MacOS try: fsobj = FSSpec(self.path) finderinfo = fsobj.FSpGetFInfo() cfile = {'creator': finderinfo.Creator, 'type': finderinfo.Type, 'location': finderinfo.Location, 'flags': finderinfo.Flags} return cfile except MacOS.Error: log.Log("Cannot read carbonfile information from %s" % (self.path,), 2) return None
|
(self.path,), 2)
|
(rpath.path,), 2)
|
def carbonfile_get(rpath): """Return carbonfile value for local rpath""" from Carbon.File import FSSpec import MacOS try: fsobj = FSSpec(self.path) finderinfo = fsobj.FSpGetFInfo() cfile = {'creator': finderinfo.Creator, 'type': finderinfo.Type, 'location': finderinfo.Location, 'flags': finderinfo.Flags} return cfile except MacOS.Error: log.Log("Cannot read carbonfile information from %s" % (self.path,), 2) return None
|
So here we assume all rdiff-backup events were recorded in "increments" increments, and if it's in-between we pick the older one here. """ global _rest_time base_incs = get_inclist(Globals.rbdir.append("increments")) if not base_incs: return _mirror_time inctimes = [inc.getinctime() for inc in base_incs] inctimes.append(_mirror_time)
|
So if restore_to_time is inbetween two increments, return the older one. """ inctimes = cls.get_increment_times()
|
def get_rest_time(cls, restore_to_time): """Return older time, if restore_to_time is in between two inc times
|
elif action == 'test-server': pass
|
elif action == 'test-server' or action == 'calculate-average': pass
|
def check_action(): """Check to make sure action is compatible with args""" global action arg_action_dict = {0: ['server'], 1: ['list-increments', 'list-increment-sizes', 'remove-older-than', 'list-at-time', 'list-changed-since', 'check-destination-dir'], 2: ['backup', 'restore', 'restore-as-of', 'compare']} l = len(args) if l == 0 and action not in arg_action_dict[l]: commandline_error("No arguments given") elif not action: if l == 2: pass # Will determine restore or backup later else: commandline_error("Switches missing or wrong number of arguments") elif action == 'test-server': pass # test-server takes any number of args elif l > 2 or action not in arg_action_dict[l]: commandline_error("Wrong number of arguments given.")
|
else: raise ParsingError("Unknown field in line '%s'" % line)
|
else: raise ParsingError("Unknown field in line '%s %s'" % (field, data))
|
def Record2RORP(record_string): """Given record_string, return RORPath For speed reasons, write the RORPath data dictionary directly instead of calling rorpath functions. Profiling has shown this to be a time critical function. """ data_dict = {} for field, data in line_parsing_regexp.findall(record_string): if field == "File": if data == ".": index = () else: index = tuple(unquote_path(data).split("/")) elif field == "Type": if data == "None": data_dict['type'] = None else: data_dict['type'] = data elif field == "Size": data_dict['size'] = long(data) elif field == "NumHardLinks": data_dict['nlink'] = int(data) elif field == "Inode": data_dict['inode'] = long(data) elif field == "DeviceLoc": data_dict['devloc'] = long(data) elif field == "SymData": data_dict['linkname'] = unquote_path(data) elif field == "DeviceNum": devchar, major_str, minor_str = data.split(" ") data_dict['devnums'] = (devchar, int(major_str), int(minor_str)) elif field == "ModTime": data_dict['mtime'] = long(data) elif field == "Uid": data_dict['uid'] = int(data) elif field == "Gid": data_dict['gid'] = int(data) elif field == "Permissions": data_dict['perms'] = int(data) else: raise ParsingError("Unknown field in line '%s'" % line) return rpath.RORPath(index, data_dict)
|
try: id = int(qualifier)
|
try: uid = int(qualifier)
|
def text_to_entrytuple(self, text): """Return entrytuple given text like 'user:foo:r--'""" typetext, qualifier, permtext = text.split(':') if qualifier: try: id = int(qualifier) except ValueError: namepair = (None, qualifier) else: namepair = (uid, None)
|
Log.FatalError("Increment file %s does not exist" % src_path)
|
Log.FatalError("Increment file %s does not exist" % rpin.path)
|
def restore_check_paths(self, rpin, rpout): """Check paths and return pair of corresponding rps""" if not rpin.lstat(): Log.FatalError("Increment file %s does not exist" % src_path) if not rpin.isincfile(): Log.FatalError("""File %s does not look like an increment file.
|
"foobar.2001-09-01T04:49:04-07:00.diff").""")
|
"foobar.2001-09-01T04:49:04-07:00.diff").""" % rpin.path)
|
def restore_check_paths(self, rpin, rpout): """Check paths and return pair of corresponding rps""" if not rpin.lstat(): Log.FatalError("Increment file %s does not exist" % src_path) if not rpin.isincfile(): Log.FatalError("""File %s does not look like an increment file.
|
else: raise ParsingError("Unknown field in line '%s'" % line)
|
else: raise ParsingError("Unknown field in line '%s %s'" % (field, data))
|
def Record2RORP(record_string): """Given record_string, return RORPath For speed reasons, write the RORPath data dictionary directly instead of calling rorpath functions. Profiling has shown this to be a time critical function. """ data_dict = {} for field, data in line_parsing_regexp.findall(record_string): if field == "File": index = quoted_filename_to_index(data) elif field == "Type": if data == "None": data_dict['type'] = None else: data_dict['type'] = data elif field == "Size": data_dict['size'] = long(data) elif field == "ResourceFork": if data == "None": data_dict['resourcefork'] = "" else: data_dict['resourcefork'] = binascii.unhexlify(data) elif field == "CarbonFile": if data == "None": data_dict['carbonfile'] = None else: data_dict['carbonfile'] = string2carbonfile(data) elif field == "NumHardLinks": data_dict['nlink'] = int(data) elif field == "Inode": data_dict['inode'] = long(data) elif field == "DeviceLoc": data_dict['devloc'] = long(data) elif field == "SymData": data_dict['linkname'] = unquote_path(data) elif field == "DeviceNum": devchar, major_str, minor_str = data.split(" ") data_dict['devnums'] = (devchar, int(major_str), int(minor_str)) elif field == "ModTime": data_dict['mtime'] = long(data) elif field == "Uid": data_dict['uid'] = int(data) elif field == "Gid": data_dict['gid'] = int(data) elif field == "Uname": if data == ":": data_dict['uname'] = None else: data_dict['uname'] = data elif field == "Gname": if data == ':': data_dict['gname'] = None else: data_dict['gname'] = data elif field == "Permissions": data_dict['perms'] = int(data) else: raise ParsingError("Unknown field in line '%s'" % line) return rpath.RORPath(index, data_dict)
|
assert dest_rp.isreg() dest_sig.setfile(Rdiff.get_signature(dest_rp))
|
if dest_rp.isreg(): dest_sig.setfile(Rdiff.get_signature(dest_rp)) else: dest_sig = dest_rp.getRORPath()
|
def get_one_sig(cls, dest_base_rpath, index, src_rorp, dest_rorp): """Return a signature given source and destination rorps""" if (Globals.preserve_hardlinks and Hardlink.islinked(src_rorp or dest_rorp)): dest_sig = rpath.RORPath(index) dest_sig.flaglinked(Hardlink.get_link_index(dest_sig)) elif dest_rorp: dest_sig = dest_rorp.getRORPath() if dest_rorp.isreg(): dest_rp = dest_base_rpath.new_index(index) assert dest_rp.isreg() dest_sig.setfile(Rdiff.get_signature(dest_rp)) else: dest_sig = rpath.RORPath(index) return dest_sig
|
def get_rf(self, index):
|
def get_rf(self, index, mir_rorp = None):
|
def get_rf(self, index): """Get a RestoreFile for given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list[0] if rf.index == index: if Globals.process_uid != 0: self.perm_changer(index) return rf elif rf.index > index: # Try to add earlier indicies. But if first is # already from same directory, or we can't find any # from that directory, then we know it can't be added. if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
if not self.add_rfs(index): return None
|
if not self.add_rfs(index, mir_rorp): return None
|
def get_rf(self, index): """Get a RestoreFile for given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list[0] if rf.index == index: if Globals.process_uid != 0: self.perm_changer(index) return rf elif rf.index > index: # Try to add earlier indicies. But if first is # already from same directory, or we can't find any # from that directory, then we know it can't be added. if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
if Globals.process_uid != 0: self.perm_changer(index)
|
if Globals.process_uid != 0: self.perm_changer(index, mir_rorp)
|
def get_rf(self, index): """Get a RestoreFile for given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list[0] if rf.index == index: if Globals.process_uid != 0: self.perm_changer(index) return rf elif rf.index > index: # Try to add earlier indicies. But if first is # already from same directory, or we can't find any # from that directory, then we know it can't be added. if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
self.add_rfs(index)): return None
|
self.add_rfs(index, mir_rorp)): return None
|
def get_rf(self, index): """Get a RestoreFile for given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list[0] if rf.index == index: if Globals.process_uid != 0: self.perm_changer(index) return rf elif rf.index > index: # Try to add earlier indicies. But if first is # already from same directory, or we can't find any # from that directory, then we know it can't be added. if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
rf = longname.update_rf(self.get_rf(index), mir_rorp,
|
rf = longname.update_rf(self.get_rf(index, mir_rorp), mir_rorp,
|
def get_fp(self, index, mir_rorp): """Return the file object (for reading) of given index""" rf = longname.update_rf(self.get_rf(index), mir_rorp, self.root_rf.mirror_rp) if not rf: log.Log("Error: Unable to retrieve data for file %s!\nThe " "cause is probably data loss from the backup repository." % (index and "/".join(index) or '.',), 2) return cStringIO.StringIO('') return rf.get_restore_fp()
|
def add_rfs(self, index):
|
def add_rfs(self, index, mir_rorp = None):
|
def add_rfs(self, index): """Given index, add the rfs in that same directory
|
def __call__(self, index):
|
def __call__(self, index, mir_rorp = None):
|
def __call__(self, index): """Given rpath, change permissions up to and including index""" old_index = self.current_index self.current_index = index if not index or index <= old_index: return self.restore_old(index) self.add_new(old_index, index)
|
rf = self.rf_list.pop(0) if rf.index < index: continue elif rf.index == index: return rf self.rf_list.insert(0, rf) if not self.add_rfs(index): return None
|
rf = self.rf_list[0] if rf.index == index: return rf elif rf.index > index: if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
def get_rf(self, index): """Return RestoreFile of given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list.pop(0) if rf.index < index: continue elif rf.index == index: return rf self.rf_list.insert(0, rf) if not self.add_rfs(index): return None
|
Log("Source may have carbonfile support, but support defaults to " "off.\n Use --carbonfile to enable.", 5)
|
log.Log("Source may have carbonfile support, but support " "defaults to off.\n Use --carbonfile to enable.", 5)
|
def set_carbonfile(self): self.update_triple(self.src_fsa.carbonfile, self.dest_fsa.carbonfile, ('carbonfile_active', 'carbonfile_write', 'carbonfile_conn')) if self.src_fsa.carbonfile and not Globals.carbonfile_active: Log("Source may have carbonfile support, but support defaults to " "off.\n Use --carbonfile to enable.", 5)
|
rp.conn.xattr.removexattr(rp.path, name)
|
try: rp.conn.xattr.removexattr(rp.path, name) except IOError, exc: if exc[0] == errno.EACCES: log.Log("Warning: unable to remove xattr %s from %s" % (name, rp.path), 7) continue else: raise
|
def clear_rp(self, rp): """Delete all the extended attributes in rpath""" for name in rp.conn.xattr.listxattr(rp.path): rp.conn.xattr.removexattr(rp.path, name)
|
if not self.src_fsa.case_sensitive and self.dest_fsa.case_sensitive:
|
if self.src_fsa.case_sensitive and not self.dest_fsa.case_sensitive:
|
def get_ctq_from_fsas(self): """Determine chars_to_quote just from filesystems, no ctq file""" if not self.src_fsa.case_sensitive and self.dest_fsa.case_sensitive: if self.dest_fsa.extended_filenames: return "A-Z;" # Quote upper case and quoting char else: return "^a-z0-9_ -." # quote everything but basic chars
|
except os.error: shutil.rmtree(self.path)
|
except os.error: if Globals.fsync_directories: self.fsync() self.conn.shutil.rmtree(self.path)
|
def delete(self): """Delete file at self.path. Recursively deletes directories.""" log.Log("Deleting %s" % self.path, 7) if self.isdir(): try: self.rmdir() except os.error: shutil.rmtree(self.path) else: self.conn.os.unlink(self.path) self.setdata()
|
elif dest_sig.isreg() and src_rp.isreg(): attach_diff(diff_rorp, src_rp, dest_sig)
|
def attach_diff(diff_rorp, src_rp, dest_sig): """Attach file of diff to diff_rorp, w/ error checking""" fileobj = robust.check_common_error( error_handler, Rdiff.get_delta_sigrp_hash, (dest_sig, src_rp)) if fileobj: diff_rorp.setfile(fileobj) diff_rorp.set_attached_filetype('diff') else: diff_rorp.zero() diff_rorp.set_attached_filetype('snapshot')
|
|
attach_snapshot(diff_rorp, src_rp)
|
if dest_sig.isreg(): attach_diff(diff_rorp, src_rp, dest_sig) else: attach_snapshot(diff_rorp, src_rp) else:
|
def attach_diff(diff_rorp, src_rp, dest_sig): """Attach file of diff to diff_rorp, w/ error checking""" fileobj = robust.check_common_error( error_handler, Rdiff.get_delta_sigrp_hash, (dest_sig, src_rp)) if fileobj: diff_rorp.setfile(fileobj) diff_rorp.set_attached_filetype('diff') else: diff_rorp.zero() diff_rorp.set_attached_filetype('snapshot')
|
else: diff_rorp.set_attached_filetype('snapshot')
|
diff_rorp.set_attached_filetype('snapshot')
|
def attach_diff(diff_rorp, src_rp, dest_sig): """Attach file of diff to diff_rorp, w/ error checking""" fileobj = robust.check_common_error( error_handler, Rdiff.get_delta_sigrp_hash, (dest_sig, src_rp)) if fileobj: diff_rorp.setfile(fileobj) diff_rorp.set_attached_filetype('diff') else: diff_rorp.zero() diff_rorp.set_attached_filetype('snapshot')
|
rp = subdir.append(filename) try: rp.touch() except IOError:
|
try: rp = subdir.append(filename) rp.touch() except (IOError, OSError):
|
def supports_unusual_chars(): """Test handling of several chars sometimes not supported""" for filename in [':', '\\', chr(175)]: rp = subdir.append(filename) try: rp.touch() except IOError: assert not rp.lstat() return 0 assert rp.lstat() rp.delete() return 1
|
assert rp.lstat() rp.delete()
|
else: assert rp.lstat() rp.delete()
|
def supports_unusual_chars(): """Test handling of several chars sometimes not supported""" for filename in [':', '\\', chr(175)]: rp = subdir.append(filename) try: rp.touch() except IOError: assert not rp.lstat() return 0 assert rp.lstat() rp.delete() return 1
|
def test_acl_quoting2(self): """This string used to segfault the quoting code, try now""" s = '\xd8\xab\xb1Wb\xae\xc5]\x8a\xbb\x15v*\xf4\x0f!\xf9>\xe2Y\x86\xbb\xab\xdbp\xb0\x84\x13k\x1d\xc2\xf1\xf5e\xa5U\x82\x9aUV\xa0\xf4\xdf4\xba\xfdX\x03\x82\x07s\xce\x9e\x8b\xb34\x04\x9f\x17 \xf4\x8f\xa6\xfa\x97\xab\xd8\xac\xda\x85\xdcKvC\xfa quoted = C.acl_quote(s) assert C.acl_unquote(quoted) == s def test_acl_quoting_equals(self): """Make sure the equals character is quoted""" assert C.acl_quote('=') != '='
|
def test_acl_quoting(self): """Test the acl_quote and acl_unquote functions""" assert C.acl_quote('foo') == 'foo', C.acl_quote('foo') assert C.acl_quote('\n') == '\\012', C.acl_quote('\n') assert C.acl_unquote('\\012') == '\n' s = '\\\n\t\145\n\01==' assert C.acl_unquote(C.acl_quote(s)) == s
|
|
log.FatalError("Error '%s' reading mapping file '%s'" %
|
Log.FatalError("Error '%s' reading mapping file '%s'" %
|
def get_string_from_file(filename): if not filename: return None rp = rpath.RPath(Globals.local_connection, filename) try: return rp.get_data() except OSError, e: log.FatalError("Error '%s' reading mapping file '%s'" % (str(e), filename))
|
datadir = self.li_getdatadir(rootrp, """Unable to open rdiff-backup-data dir. Try finding the increments first using --list-increments.""")
|
datadir = rootrp.append("rdiff-backup-data") if not datadir.lstat() or not datadir.isdir(): Log.FatalError("Unable to open rdiff-backup-data dir %s" % (datadir.path,))
|
def RemoveOlderThan(self, rootrp): """Remove all increment files older than a certain time""" datadir = self.li_getdatadir(rootrp, """Unable to open rdiff-backup-data dir.
|
incobjs = filter(lambda x: x.time < time, Manage.get_incobjs(datadir)) incobjs_time = ", ".join(map(IncObj.pretty_time, incobjs)) if not incobjs:
|
itimes = [Time.stringtopretty(inc.getinctime()) for inc in Restore.get_inclist(datadir.append("increments")) if Time.stringtotime(inc.getinctime()) < time] if not itimes:
|
def RemoveOlderThan(self, rootrp): """Remove all increment files older than a certain time""" datadir = self.li_getdatadir(rootrp, """Unable to open rdiff-backup-data dir.
|
elif len(incobjs) > 1 and not self.force: Log.FatalError("Found %d relevant increments, dated %s.\n" "If you want to delete multiple increments in this way, " "use the --force." % (len(incobjs), incobjs_time)) Log("Deleting increment%sat %s" % (len(incobjs) == 1 and " " or "s ", incobjs_time), 3)
|
inc_pretty_time = "\n".join(itimes) if len(itimes) > 1 and not self.force: Log.FatalError("Found %d relevant increments, dated:\n%s" "\nIf you want to delete multiple increments in this way, " "use the --force." % (len(itimes), inc_pretty_time)) Log("Deleting increment%sat times:\n%s" % (len(itimes) == 1 and " " or "s ", inc_pretty_time), 3)
|
def RemoveOlderThan(self, rootrp): """Remove all increment files older than a certain time""" datadir = self.li_getdatadir(rootrp, """Unable to open rdiff-backup-data dir.
|
log.Log.FatalError("""New quoting requirements!
|
if Globals.chars_to_quote is None: log.Log.FatalError("""New quoting requirements!
|
def compare_ctq_file(self, rbdir, suggested_ctq): """Compare ctq file with suggested result, return actual ctq""" ctq_rp = rbdir.append("chars_to_quote") if not ctq_rp.lstat(): if Globals.chars_to_quote is None: actual_ctq = suggested_ctq else: actual_ctq = Globals.chars_to_quote ctq_rp.write_string(actual_ctq) return actual_ctq
|
else: log.Log.FatalError("""New quoting requirements
|
elif Globals.chars_to_quote is None: log.Log.FatalError("""New quoting requirements
|
def write_new_chars(): """Replace old chars_to_quote file with new value""" if ctq_rp.lstat(): ctq_rp.delete() fp = ctq_rp.open("wb") fp.write(self.chars_to_quote) assert not fp.close()
|
"/../rdiff-backup -v3 --no-compare-inode "
|
"/../rdiff-backup -v9 --no-compare-inode "
|
def reset_schema(self): self.rb_schema = (SourceDir + "/../rdiff-backup -v3 --no-compare-inode " "--remote-schema './chdir-wrapper2 %s' ")
|
for sub_rf in rf.yield_sub_rfs(): yield sub_rf.get_attribs()
|
for sub_rf in rf.yield_sub_rfs(): for attribs in cls.get_rorp_iter_from_rf(sub_rf): yield attribs
|
def get_rorp_iter_from_rf(cls, rf): """Recursively yield mirror rorps from rf""" rorp = rf.get_attribs() yield rorp if rorp.isdir(): for sub_rf in rf.yield_sub_rfs(): yield sub_rf.get_attribs()
|
if time.daylight: utc_in_secs = time.mktime(timetuple) - time.altzone
|
if dst_in_effect: utc_in_secs = time.mktime(timetuple) - time.altzone
|
def stringtotime(timestring): """Return time in seconds from w3 timestring If there is an error parsing the string, or it doesn't look like a w3 datetime string, return None. """ try: date, daytime = timestring[:19].split("T") year, month, day = map(int, date.split("-")) hour, minute, second = map(int, daytime.split(":")) assert 1900 < year < 2100, year assert 1 <= month <= 12 assert 1 <= day <= 31 assert 0 <= hour <= 23 assert 0 <= minute <= 59 assert 0 <= second <= 61 # leap seconds timetuple = (year, month, day, hour, minute, second, -1, -1, -1) if time.daylight: utc_in_secs = time.mktime(timetuple) - time.altzone else: utc_in_secs = time.mktime(timetuple) - time.timezone return long(utc_in_secs) + tzdtoseconds(timestring[19:]) except (TypeError, ValueError, AssertionError): return None
|
if time.daylight: offset = -1 * time.altzone/60 else: offset = -1 * time.timezone/60
|
if dst_in_effect: offset = -time.altzone/60 else: offset = -time.timezone/60
|
def gettzd(): """Return w3's timezone identification string. Expresed as [+/-]hh:mm. For instance, PST is -08:00. Zone is coincides with what localtime(), etc., use. """ if time.daylight: offset = -1 * time.altzone/60 else: offset = -1 * time.timezone/60 if offset > 0: prefix = "+" elif offset < 0: prefix = "-" else: return "Z" # time is already in UTC hours, minutes = map(abs, divmod(offset, 60)) assert 0 <= hours <= 23 assert 0 <= minutes <= 59 return "%s%02d:%02d" % (prefix, hours, minutes)
|
elif ((key == 'uname' or key == 'gname') and not other.data.has_key(key)): pass
|
elif key == 'uname' or key == 'gname': other_name = other.data.get(key, None) if (other_name and other_name != "None" and other_name != self.data[key]): return None
|
def __eq__(self, other): """True iff the two rorpaths are equivalent""" if self.index != other.index: return None
|
if self.data[key] != other.data[key]: return None
|
if self.data[key] != other_val: return None
|
def __eq__(self, other): """True iff the two rorpaths are equivalent""" if self.index != other.index: return None
|
"rdiff-with --check-destination-dir option to revert directory "
|
"with --check-destination-dir option to revert directory "
|
def restore_check_backup_dir(mirror_root, src_rp = None, restore_as_of = 1): """Make sure backup dir root rpin is in consistent state""" if not restore_as_of and not src_rp.isincfile(): Log.FatalError("""File %s does not look like an increment file.
|
collated, Globals.pipeline_max_length*4)
|
collated, Globals.pipeline_max_length*4, baserp)
|
def set_rorp_cache(cls, baserp, source_iter, for_increment): """Initialize cls.CCPP, the destination rorp cache
|
"""Cache a collated iter of (source_rorp, dest_rp) pairs
|
"""Cache a collated iter of (source_rorp, dest_rorp) pairs
|
def patch_and_increment(cls, dest_rpath, source_diffiter, inc_rpath): """Patch dest_rpath with rorpiter of diffs and write increments""" ITR = rorpiter.IterTreeReducer(IncrementITRB, [dest_rpath, inc_rpath, cls.CCPP]) for diff in rorpiter.FillInIter(source_diffiter, dest_rpath): log.Log("Processing changed file " + diff.get_indexpath(), 5) ITR(diff.index, diff) ITR.Finish() cls.CCPP.close() dest_rpath.setdata()
|
def __init__(self, collated_iter, cache_size):
|
def __init__(self, collated_iter, cache_size, dest_root_rp):
|
def __init__(self, collated_iter, cache_size): """Initialize new CCWP.""" self.iter = collated_iter # generates (source_rorp, dest_rorp) pairs self.cache_size = cache_size self.statfileobj = statistics.init_statfileobj() if Globals.file_statistics: statistics.FileStats.init() metadata.OpenMetadata()
|
cmdlist = ['mknod', self.path, type, str(major), str(minor)] if self.conn.os.spawnvp(os.P_WAIT, 'mknod', cmdlist) != 0: raise RPathException("Error running %s" % cmdlist) if type == 'c': datatype = 'chr' elif type == 'b': datatype = 'blk'
|
if type == 'c': datatype = 'chr' mode = stat.S_IFCHR | 0600 elif type == 'b': datatype = 'blk' mode = stat.S_IFBLK | 0600
|
def makedev(self, type, major, minor): """Make a special file with specified type, and major/minor nums""" cmdlist = ['mknod', self.path, type, str(major), str(minor)] if self.conn.os.spawnvp(os.P_WAIT, 'mknod', cmdlist) != 0: raise RPathException("Error running %s" % cmdlist) if type == 'c': datatype = 'chr' elif type == 'b': datatype = 'blk' else: raise RPathException self.setdata()
|
assert conn.pow(2,3) == 8 assert conn.os.path.join("a", "b") == "a/b" version = conn.reval("lambda: Globals.version")
|
assert conn.Globals.get('current_time') is None assert type(conn.os.getuid()) is int version = conn.Globals.get('version')
|
def test_connection(conn_number): """Test connection. conn_number 0 is the local connection""" print "Testing server started by: ", __conn_remote_cmds[conn_number] conn = Globals.connections[conn_number] try: assert conn.pow(2,3) == 8 assert conn.os.path.join("a", "b") == "a/b" version = conn.reval("lambda: Globals.version") except: sys.stderr.write("Server tests failed\n") raise if not version == Globals.version: print """Server may work, but there is a version mismatch:
|
elif key == 'devloc' or key == 'nlink': pass
|
elif key == 'nlink': pass
|
def __eq__(self, other): """True iff the two rorpaths are equivalent""" if self.index != other.index: return None
|
elif (key == 'inode' and
|
elif ((key == 'inode' or key == 'devloc') and
|
def __eq__(self, other): """True iff the two rorpaths are equivalent""" if self.index != other.index: return None
|
return ea.index == self.index and ea.attr_dict == self.attr_dict
|
return ea.attr_dict == self.attr_dict
|
def __eq__(self, ea): """Equal if all attributes and index are equal""" assert isinstance(ea, ExtendedAttributes) return ea.index == self.index and ea.attr_dict == self.attr_dict
|
if self.index != acl.index: return 0
|
def __eq__(self, acl): """Compare self and other access control list
|
|
if self.index != acl.index: print "index %s not equal to index %s" % (self.index, acl.index) return 0
|
def eq_verbose(self, acl): """Returns same as __eq__ but print explanation if not equal""" if self.index != acl.index: print "index %s not equal to index %s" % (self.index, acl.index) return 0 if not self.cmp_entry_list(self.entry_list, acl.entry_list): print "ACL entries for %s compare differently" % (self.index,) return 0 if not self.cmp_entry_list(self.default_entry_list, acl.default_entry_list): print "Default ACL entries for %s do not compare" % (self.index,) return 0 return 1
|
|
So here we assume all rdiff-backup events were recorded in "increments" increments, and if it's in-between we pick the older one here.
|
So if restore_to_time is inbetween two increments, return the older one.
|
def get_rest_time(cls, restore_to_time): """Return older time, if restore_to_time is in between two inc times
|
if not _mirror_time: return_list = [cls.get_mirror_time()] else: return_list = [_mirror_time]
|
if not _mirror_time: d = {cls.get_mirror_time(): None} else: d = {_mirror_time: None}
|
def get_increment_times(cls, rp = None): """Return list of times of backups, including current mirror""" if not _mirror_time: return_list = [cls.get_mirror_time()] else: return_list = [_mirror_time] if not rp or not rp.index: rp = Globals.rbdir.append("increments") for inc in get_inclist(rp): return_list.append(inc.getinctime()) return return_list
|
for inc in get_inclist(rp): return_list.append(inc.getinctime())
|
for inc in get_inclist(rp): d[inc.getinctime()] = None for inc in get_inclist(Globals.rbdir.append("mirror_metadata")): d[inc.getinctime()] = None return_list = d.keys() return_list.sort()
|
def get_increment_times(cls, rp = None): """Return list of times of backups, including current mirror""" if not _mirror_time: return_list = [cls.get_mirror_time()] else: return_list = [_mirror_time] if not rp or not rp.index: rp = Globals.rbdir.append("increments") for inc in get_inclist(rp): return_list.append(inc.getinctime()) return return_list
|
self.name2id_dict[name] = out_id
|
if out_id is not None: self.name2id_dict[name] = out_id
|
def get_id_from_name(self, name): """Return mapped id from name only, or None if cannot""" try: return self.name2id_dict[name] except KeyError: out_id = self.find_id_from_name(name) self.name2id_dict[name] = out_id return out_id
|
if Globals.process_uid != 0: self.perm_changer(rf.mirror_rp)
|
if Globals.process_uid != 0: self.perm_changer(index)
|
def get_rf(self, index): """Return RestoreFile of given index, or None""" while 1: if not self.rf_list: if not self.add_rfs(index): return None rf = self.rf_list[0] if rf.index == index: if Globals.process_uid != 0: self.perm_changer(rf.mirror_rp) return rf elif rf.index > index: # Try to add earlier indicies. But if first is # already from same directory, or we can't find any # from that directory, then we know it can't be added. if (index[:-1] == rf.index[:-1] or not self.add_rfs(index)): return None else: del self.rf_list[0]
|
if Globals.process_uid != 0: self.perm_changer(temp_rf.mirror_rp)
|
def add_rfs(self, index): """Given index, add the rfs in that same directory
|
|
def __call__(self, rp): """Given rpath, change permissions up and including rp""" index, old_index = rp.index, self.current_index
|
def __call__(self, index): """Given rpath, change permissions up to and including index""" old_index = self.current_index
|
def __call__(self, rp): """Given rpath, change permissions up and including rp""" index, old_index = rp.index, self.current_index self.current_index = index if not index or index == old_index: return assert index > old_index, (index, old_index) self.restore_old(rp, index) self.add_new(rp, old_index, index)
|
if not index or index == old_index: return assert index > old_index, (index, old_index) self.restore_old(rp, index) self.add_new(rp, old_index, index) def restore_old(self, rp, index):
|
if not index or index <= old_index: return self.restore_old(index) self.add_new(old_index, index) def restore_old(self, index):
|
def __call__(self, rp): """Given rpath, change permissions up and including rp""" index, old_index = rp.index, self.current_index self.current_index = index if not index or index == old_index: return assert index > old_index, (index, old_index) self.restore_old(rp, index) self.add_new(rp, old_index, index)
|
def add_new(self, rp, old_index, index):
|
def add_new(self, old_index, index):
|
def add_new(self, rp, old_index, index): """Change permissions of directories between old_index and index""" for rp in self.get_new_rp_list(rp, old_index, index): if ((rp.isreg() and not rp.readable()) or (rp.isdir() and not rp.hasfullperms())): old_perms = rp.getperms() self.open_index_list.insert(0, (index, rp, old_perms)) if rp.isreg(): rp.chmod(0400 | old_perms) else: rp.chmod(0700 | old_perms)
|
for rp in self.get_new_rp_list(rp, old_index, index):
|
for rp in self.get_new_rp_list(old_index, index):
|
def add_new(self, rp, old_index, index): """Change permissions of directories between old_index and index""" for rp in self.get_new_rp_list(rp, old_index, index): if ((rp.isreg() and not rp.readable()) or (rp.isdir() and not rp.hasfullperms())): old_perms = rp.getperms() self.open_index_list.insert(0, (index, rp, old_perms)) if rp.isreg(): rp.chmod(0400 | old_perms) else: rp.chmod(0700 | old_perms)
|
self.open_index_list.insert(0, (index, rp, old_perms))
|
self.open_index_list.insert(0, (rp.index, rp, old_perms))
|
def add_new(self, rp, old_index, index): """Change permissions of directories between old_index and index""" for rp in self.get_new_rp_list(rp, old_index, index): if ((rp.isreg() and not rp.readable()) or (rp.isdir() and not rp.hasfullperms())): old_perms = rp.getperms() self.open_index_list.insert(0, (index, rp, old_perms)) if rp.isreg(): rp.chmod(0400 | old_perms) else: rp.chmod(0700 | old_perms)
|
def get_new_rp_list(self, rp, old_index, index): """Return list of new rp's between old_index and index"""
|
def get_new_rp_list(self, old_index, index): """Return list of new rp's between old_index and index Do this lazily so that the permissions on the outer directories are fixed before we need the inner dirs. """
|
def get_new_rp_list(self, rp, old_index, index): """Return list of new rp's between old_index and index""" for i in range(len(index)-1, -1, -1): if old_index[:i] == index[:i]: common_prefix_len = i break else: assert 0
|
new_rps = [] for total_len in range(common_prefix_len+1, len(index)): new_rps.append(self.root_rp.new_index(index[:total_len])) new_rps.append(rp) return new_rps
|
for total_len in range(common_prefix_len+1, len(index)+1): yield self.root_rp.new_index(index[:total_len])
|
def get_new_rp_list(self, rp, old_index, index): """Return list of new rp's between old_index and index""" for i in range(len(index)-1, -1, -1): if old_index[:i] == index[:i]: common_prefix_len = i break else: assert 0
|
extended_filename = ':\\' + chr(175)
|
extended_filename = ':\\ ' + chr(225) + chr(132) + chr(137)
|
def set_extended_filenames(self, subdir): """Set self.extended_filenames by trying to write a path""" assert not self.read_only
|
Log.FatalError("--never-drop-acls specified, but ACL support\n" "missing from destination filesystem")
|
log.Log.FatalError("--never-drop-acls specified, but ACL support\n" "missing from destination filesystem")
|
def set_acls(self): self.update_triple(self.src_fsa.acls, self.dest_fsa.acls, ('acls_active', 'acls_write', 'acls_conn')) if Globals.never_drop_acls and not Globals.acls_active: Log.FatalError("--never-drop-acls specified, but ACL support\n" "missing from destination filesystem")
|
elif success == 1 or success == 2:
|
elif success == 1: metadata_rorp = source_rorp else: metadata_rorp = None if success == 1 or success == 2:
|
def post_process(self, source_rorp, dest_rorp, changed, success, inc): """Post process source_rorp and dest_rorp.
|
metadata_rorp = source_rorp else: metadata_rorp = None
|
def post_process(self, source_rorp, dest_rorp, changed, success, inc): """Post process source_rorp and dest_rorp.
|
|
assert 700000 <= s2.SourceFileSize < 750000
|
assert 700000 <= s2.SourceFileSize < 750000, s2.SourceFileSize
|
def testStatistics(self): """Test the writing of statistics
|
while 1:
|
for i in range(5):
|
def run(qDirectory,maxSvrThreads,maxUsrThreads,sleepInterval,execPath): displayParams(qDirectory,maxSvrThreads,maxUsrThreads,sleepInterval,execPath) #Check to see if already running. lastPID = "0" try: f=open(qDirectory+"tfQManager.pid",'r') lastPID = f.readline().strip() f.close() if __debug__: traceMsg("Last QManager pid" + str(lastPID)) except: pass if (int(lastPID) > 0): if (checkPIDStatus(lastPID) > 0): if __debug__: traceMsg("Already Running on pid:" + lastPID) raise KeyboardInterrupt if __debug__: traceMsg("QManager Starting") f=open(qDirectory+"tfQManager.pid",'w') f.write(str(getpid()) + "\n") f.flush() f.close() if __debug__: traceMsg("QManager PID :" + str(getpid())) # Extract from the execPath the Btphptornado.py script line. # this will be used during the process Counts to ensure we are # unique from other running instances. ePath = execPath.split(" ") for x in ePath: if (re.search('btphptornado', x) > 0 ): btphp = x if __debug__: traceMsg("btphp ->"+btphp) if (re.search('btphptornado', btphp) > 0 ): try: while 1: threadCount = checkThreadCount(btphp) if __debug__: traceMsg("CurrentThreadCount = " + str( threadCount )) # # Start Looping untill we have maxSvrThreads. # Or no Qinfo Files. # while int(threadCount) <= int(maxSvrThreads): try: # # Get the Next File. # Check to see if we got a file back. # if not break out of looping we don't have any files. # fileList = [] fileList = getFileList(qDirectory) for currentFile in fileList: if currentFile == "": break # set the name of the current statsFile statsFile = currentFile.replace('/queue','').strip('.Qinfo') if __debug__: traceMsg("statsFile = " + statsFile) # # get the User name if we didn't get one # something was wrong with this file. # currentUser = getUserName(statsFile) if currentUser == "": if __debug__: traceMsg("No User Found : " + currentFile) # Prep StatsFile updateStats(statsFile, '0') removeFile(currentFile) break else: if __debug__: traceMsg("Current User: " + currentUser) # # Now check user thread count # usrThreadCount = getUserThreadCount(currentUser, btphp) # # check UserThreadCount # if int(usrThreadCount) < int(maxUsrThreads): # # Now check to see if we start a new thread will we be over the max ? # threadCount = checkThreadCount(btphp) if int(threadCount) + 1 <= int(maxSvrThreads): if int(usrThreadCount) + 1 <= int(maxUsrThreads): cmdToRun = getCommandToRun(currentFile) #if __debug__: traceMsg(" Cmd :" + cmdToRun) if (re.search(currentUser,cmdToRun) == 0): if __debug__: traceMsg("Incorrect User found in Cmd") cmdToRun = '' if (re.search('\|',cmdToRun) > 0): if __debug__: traceMsg(" Failed pipe ") cmdToRun = '' else: cmdToRun = execPath + cmdToRun cmdToRun = cmdToRun.replace('TFQUSERNAME', currentUser) #if __debug__: traceMsg(" Cmd :" + cmdToRun) if cmdToRun != "": #PrepStatsFile updateStats(statsFile, '1') if __debug__: traceMsg("Fire off command") try: garbage = doCommand(cmdToRun) # # wait until the torrent process starts # and creates a pid file. # once this happens we can remove the Qinfo. # while 1: try: time.sleep(2) f=open(statsFile+".pid",'r') f.close() break except: continue # Ok this one started Remove Qinfo File. if __debug__: traceMsg("Removing : " + currentFile) removeFile(currentFile) except: continue else: # # Something wrong with command file. # if __debug__: traceMsg("Unable to obtain valid cmdToRun : " + currentFile) removeFile(currentFile) else: if __debug__: traceMsg("Skipping this file since the User has to many threads") if __debug__: traceMsg("Skipping : " + currentFile) else: if __debug__: traceMsg("Skipping this file since the Server has to many threads") if __debug__: traceMsg("Skipping : " + currentFile) break except: break threadCount = checkThreadCount(btphp) if __debug__: traceMsg("CurrentThreadCount = " + str( threadCount )) if __debug__: traceMsg("Sleeping...") time.sleep(float(sleepInterval)) except: removeFile(qDirectory+"tfQManager.pid") else: LOG = True if __debug__: traceMsg("Only supported client is btphptornado.") removeFile(qDirectory+"tfQManager.pid")
|
self.upTotal = statistics['upTotal']
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
self.d.display({'activity':_("Starting ..."),
|
self.d.display({'activity':_("initializing"),
|
def start_torrent(self,metainfo,save_incomplete_as,save_as): """Tells the MultiTorrent to begin downloading.""" try: self.d.display({'activity':_("Starting ..."), 'fractionDone':0}) multitorrent = self.multitorrent df = multitorrent.create_torrent(metainfo, save_incomplete_as, save_as) df.addErrback( wrap_log('Failed to start torrent', self.logger)) def create_finished(torrent): self.torrent = torrent if self.torrent.is_initialized(): multitorrent.start_torrent(self.torrent.infohash) else: # HEREDAVE: why should this set the doneflag? self.core_doneflag.set() # e.g., if already downloading... df.addCallback( create_finished ) except KeyboardInterrupt: raise except UserFailure, e: self.logger.error( "Failed to create torrent: " + unicode(e.args[0]) ) except Exception, e: self.logger.error( "Failed to create torrent", exc_info = e ) return
|
FILE = open(self.statFile,"w") FILE.write(repr(self.state)+"\n") FILE.write(self.percentDone+"\n") FILE.write(self.timeEst+"\n") FILE.write(self.downRate+"\n") FILE.write(self.upRate+"\n") FILE.write(self.tfOwner+"\n") FILE.write(self.seeds+"\n") FILE.write(self.peers+"\n") FILE.write(self.shareRating+"\n") FILE.write(self.seedLimit+"\n") FILE.write(repr(self.upTotal)+"\n") FILE.write(repr(self.downTotal)+"\n") FILE.write(repr(self.fileSize)[:-1]) FILE.flush() FILE.close()
|
try: FILE = open(self.statFile,"w") FILE.write(repr(self.state)+"\n") FILE.write(self.percentDone+"\n") FILE.write(self.timeEst+"\n") FILE.write(self.downRate+"\n") FILE.write(self.upRate+"\n") FILE.write(self.tfOwner+"\n") FILE.write(self.seeds+"\n") FILE.write(self.peers+"\n") FILE.write(self.shareRating+"\n") FILE.write(self.seedLimit+"\n") FILE.write(repr(self.upTotal)+"\n") FILE.write(repr(self.downTotal)+"\n") FILE.write(repr(self.fileSize)) FILE.flush() FILE.close() except: pass
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
seedLimitReached = 0
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
upTotal = statistics['upTotal']
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
if upTotal is not None: if upTotal > 0: self.shareRating = _("%.3f") % (upTotal / downTotal) else: self.shareRating = "0"
|
if upTotal > 0: self.shareRating = _("%.3f") % (upTotal / downTotal) if self.done and self.seedLimit > 0: currentShareRating = (int) (upTotal / downTotal) if currentShareRating >= self.seedLimit: seedLimitReached = 1 app.logger.error("seed-Limit Reached, setting shutdown-flag...")
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
if not self.done: self.seedStatus = _("%d") % statistics['numSeeds'] else: self.seedStatus = "" self.peerStatus = _("%d") % statistics['numPeers']
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
upTotal = 0
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
self.seedStatus = "0"
|
seeds = None seeds = statistics.get('numSeeds') if seeds is None: seeds = 0 numCopies = statistics.get('numCopies') if numCopies is not None: seeds += numCopies self.seedStatus = _("%d") % seeds peers = None peers = statistics.get('numPeers') if peers is not None: self.peerStatus = _("%d") % peers else:
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
app.percentDone = self.percentDone app.shareRating = self.shareRating app.upTotal = upTotal app.downTotal = downTotal
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
running = 0 try: FILE = open(self.statFile, 'r') running = FILE.read(1) FILE.close() except: running = 0
|
if seedLimitReached == 0: running = '0' try: FILE = open(self.statFile, 'r') running = FILE.read(1) FILE.close() except: running = '0' else: running = '0'
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
FILE.write(self.d.percentDone+"\n")
|
pcts = "-"+self.d.percentDone pctf = float(pcts) pctf -= 100 FILE.write(str(pctf)) FILE.write("\n")
|
def shutdown(): print "shutdown." self.d.display({'activity':_("shutting down"), 'fractionDone':0}) if self.multitorrent: df = self.multitorrent.shutdown() stop_rawserver = lambda *a : rawserver.stop() df.addCallbacks(stop_rawserver, stop_rawserver) else: rawserver.stop()
|
s = str(n) size = s[-3:] while len(s) > 3: s = s[:-3] size = '%s,%s' % (s[-3:], size) size = '%s (%s)' % (size, str(Size(n))) return size
|
return int(n)
|
def fmtsize(n): s = str(n) size = s[-3:] while len(s) > 3: s = s[:-3] size = '%s,%s' % (s[-3:], size) size = '%s (%s)' % (size, str(Size(n))) return size
|
self.fileSize = ''
|
self.fileSize = 0
|
def __init__(self): self.done = '0' self.state = 1 self.percentDone = '' self.timeEst = 'Starting ...' self.downRate = '0.0 KB/s' self.upRate = '0.0 KB/s' self.tfOwner = config['tf_owner'] self.shareRating = '' self.seeds = "0" self.peers = "0" self.errors = [] self.file = '' self.downloadTo = '' self.fileSize = '' self.numpieces = 0 self.seedLimit = config['seed_limit'] self.statFile = config['stat_file'] self.upTotal = "0" self.downTotal = "0"
|
FILE.write(repr(self.fileSize))
|
FILE.write(repr(self.fileSize)[:-1])
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
self.timeEst = '' self.downRate = '---' self.upRate = '---'
|
self.timeEst = 'Starting ...' self.downRate = '0.0 kB/s' self.upRate = '0.0 kB/s' self.tfOwner = config['tf_owner']
|
def __init__(self): self.done = False self.percentDone = '' self.timeEst = '' self.downRate = '---' self.upRate = '---' self.shareRating = '' self.seedStatus = '' self.peerStatus = '' self.errors = [] self.file = '' self.downloadTo = '' self.fileSize = '' self.numpieces = 0
|
self.downRate = '---'
|
self.downRate = '0.0 kB/s'
|
def finished(self): self.done = True self.downRate = '---' self.display({'activity':_("download succeeded"), 'fractionDone':1})
|
print '\n\n\n\n'
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
|
self.downRate = '%.1f KB/s' % (downRate / (1 << 10))
|
self.downRate = '%.1f kB/s' % (downRate / (1 << 10))
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
self.upRate = '%.1f KB/s' % (upRate / (1 << 10))
|
self.upRate = '%.1f kB/s' % (upRate / (1 << 10)) upTotal = None downTotal = None
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
self.seedStatus = _("%d seen now") % statistics['numSeeds']
|
self.seedStatus = _("%d") % statistics['numSeeds']
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
self.peerStatus = _("%d seen now") % statistics['numPeers'] if not self.errors: print _("Log: none")
|
self.peerStatus = _("%d") % statistics['numPeers']
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
print _("Log:") for err in self.errors[-4:]: print err print print _("saving: "), self.file print _("file size: "), self.fileSize print _("percent done: "), self.percentDone print _("time left: "), self.timeEst print _("download to: "), self.downloadTo print _("download rate: "), self.downRate print _("upload rate: "), self.upRate print _("share rating: "), self.shareRating print _("seed status: "), self.seedStatus print _("peer status: "), self.peerStatus
|
upTotal = 0 downTotal = 0 self.shareRating = "oo" self.seedStatus = "0" self.peerStatus = "0" app.percentDone = self.percentDone app.shareRating = self.shareRating app.upTotal = upTotal app.downTotal = downTotal running = 0 try: FILE = open(self.statFile, 'r') running = FILE.read(1) FILE.close() except: running = 0 if running == '0': app.logger.error("shutting down...") self.state = 0 df = app.multitorrent.shutdown() stop_rawserver = lambda *a : app.multitorrent.rawserver.stop() df.addCallbacks(stop_rawserver, stop_rawserver) else: try: FILE = open(self.statFile,"w") FILE.write(repr(self.state)+"\n") FILE.write(self.percentDone+"\n") FILE.write(self.timeEst+"\n") FILE.write(self.downRate+"\n") FILE.write(self.upRate+"\n") FILE.write(self.tfOwner+"\n") FILE.write(self.seedStatus+"\n") FILE.write(self.peerStatus+"\n") FILE.write(self.shareRating+"\n") FILE.write(self.seedLimit+"\n") FILE.write(repr(upTotal)+"\n") FILE.write(repr(downTotal)+"\n") FILE.write(repr(self.fileSize)) if self.errors: FILE.write("\n") for err in self.errors[0:]: FILE.write(err) FILE.flush() FILE.close() except Exception, e: app.logger.error( "Failed to write stat-file", exc_info = e )
|
def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew')
|
pcts = "-"+self.d.percentDone
|
pcts = "-"+self.percentDone
|
def shutdown(): print "shutdown." self.d.display({'activity':_("shutting down"), 'fractionDone':0}) if self.multitorrent: df = self.multitorrent.shutdown() stop_rawserver = lambda *a : rawserver.stop() df.addCallbacks(stop_rawserver, stop_rawserver) else: rawserver.stop()
|
FILE.write(self.d.shareRating+"\n")
|
FILE.write(self.shareRating+"\n")
|
def shutdown(): print "shutdown." self.d.display({'activity':_("shutting down"), 'fractionDone':0}) if self.multitorrent: df = self.multitorrent.shutdown() stop_rawserver = lambda *a : rawserver.stop() df.addCallbacks(stop_rawserver, stop_rawserver) else: rawserver.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.