rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if i < first: continue
|
if i < first: i = i+1 continue
|
def undoLog(self, first, last, filter=None): self._lock_acquire() try: transactions=self._data.items() pos=len(transactions) encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos: pos=pos-1 if i < first: continue tid, (p, u, d, e, t) = transactions[pos] if p: continue d={'id': encode(tid)[:-1], 'time': TimeStamp(tid).timeTime(), 'user_name': u, 'description': d} if e: d.update(loads(e))
|
if index[oid]==srcpos:
|
if index_get(oid, None) == srcpos:
|
def commitVersion(self, src, dest, transaction, abort=None): # We are going to commit by simply storing back pointers.
|
p = self.klass()
|
p = P()
|
def testInterface(self): self.assert_(IPersistent.isImplementedByInstancesOf(Persistent), "%s does not implement IPersistent" % Persistent) p = Persistent() self.assert_(IPersistent.isImplementedBy(p), "%s does not implement IPersistent" % p)
|
invalid=self._invalid
|
def commit(self, object, transaction): oid=object._p_oid if oid is None or object._p_jar is not self: oid = self.new_oid() object._p_jar=self object._p_oid=oid
|
|
if read(4) == '<?xm':
|
magic=read(4) if magic == '<?xm':
|
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction!
|
else: file.seek(0) if file.read(4) != 'ZEXP': raise POSException.ExportError, 'Invalid export header'
|
if magic != 'ZEXP': raise POSException.ExportError, 'Invalid export header'
|
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction!
|
self.t = None
|
def tearDown(self): self.t = None del self.t
|
|
def _getRoot(self):
|
if self.storage is not None: self.storage.close() self.storage.cleanup() def openDB(self):
|
def tearDown(self): self.t = None del self.t
|
s = FileStorage(n) db = DB(s) root = db.open().root() return root def _closeDB(self, root): root._p_jar._db.close() root = None def _delDB(self): os.system('rm fs_tmp__*')
|
self.storage = FileStorage(n) self.db = DB(self.storage)
|
def _getRoot(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB n = 'fs_tmp__%s' % os.getpid() s = FileStorage(n) db = DB(s) root = db.open().root() return root
|
class NormalSetTests(Base): """ Test common to all set types """ class ExtendedSetTests(NormalSetTests):
|
class SetTests(Base):
|
def testFailMergeInsert(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[-99999]=-99999 b1[e1[0][0]]=e1[0][1] b2[99999]=99999 b2[e1[0][0]]=e1[0][1] test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1)
|
b1=base.__class__(base) b2=base.__class__(base) bm=base.__class__(base) items=base.keys()
|
b1 = base.__class__(base.keys()) b2 = base.__class__(base.keys()) bm = base.__class__(base.keys()) items = base.keys()
|
def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067]
|
except (ConflictError, ValueError), err: pass
|
except ConflictError, err: pass
|
def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: pass # ConflictError is the only exception that should occur else: assert 0, message else: merged=o1._p_resolveConflict(s1, s2, s3) assert merged==expected, message
|
class TestIOSets(ExtendedSetTests, TestCase):
|
class TestIOSets(SetTests, TestCase):
|
def setUp(self): self.t = IIBTree()
|
class TestOOSets(ExtendedSetTests, TestCase):
|
class TestOOSets(SetTests, TestCase):
|
def setUp(self): self.t = IOSet()
|
class TestIISets(ExtendedSetTests, TestCase):
|
class TestIISets(SetTests, TestCase):
|
def setUp(self): self.t = OOSet()
|
class TestOISets(ExtendedSetTests, TestCase):
|
class TestOISets(SetTests, TestCase):
|
def setUp(self): self.t = IISet()
|
class TestIOTreeSets(NormalSetTests, TestCase):
|
class TestIOTreeSets(SetTests, TestCase):
|
def setUp(self): self.t = OISet()
|
class TestOOTreeSets(NormalSetTests, TestCase):
|
class TestOOTreeSets(SetTests, TestCase):
|
def setUp(self): self.t = IOTreeSet()
|
class TestIITreeSets(NormalSetTests, TestCase):
|
class TestIITreeSets(SetTests, TestCase):
|
def setUp(self): self.t = OOTreeSet()
|
class TestOITreeSets(NormalSetTests, TestCase):
|
class TestOITreeSets(SetTests, TestCase):
|
def setUp(self): self.t = IITreeSet()
|
class NastyConfict(Base, TestCase): def setUp(self): self.t = OOBTree() def testResolutionBlowsUp(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) self.t.update({1:2, 2:3}) get_transaction().commit() copy.update({3:4}) get_transaction().commit() list(copy.values()) def testBucketSplitConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t numtoadd = 16 candidate = 60 while numtoadd: if not b.has_key(candidate): b[candidate] = candidate numtoadd -= 1 candidate += 1 state = b.__getstate__() self.assertEqual(len(state) , 2) self.assertEqual(len(state[0]), 7) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 75) self.assertEqual(state[0][5], 120) get_transaction().commit() b = copy for i in range(112, 116): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.assertRaises(ConflictError, get_transaction().commit) get_transaction().abort() def testEmptyBucketConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t for k in 60, 64, 68, 72, 76, 80, 84, 88: del b[k] state = b.__getstate__() self.assertEqual(len(state) , 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() b = copy for k in 92, 96, 100, 104, 108, 112, 116: del b[k] state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() self.assertRaises(AssertionError, b._check) def testEmptyBucketNoConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t b[1] = 1 state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() b = copy for k in range(120, 200, 4): del b[k] state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 60) get_transaction().commit() b._check()
|
def setUp(self): self.t = OIBucket()
|
|
TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = makeSuite(TestOOSets, 'test') TIOTreeSet = makeSuite(TestIOTreeSets, 'test') TOOTreeSet = makeSuite(TestOOTreeSets, 'test') TOITreeSet = makeSuite(TestIOTreeSets, 'test') TIITreeSet = makeSuite(TestOOTreeSets, 'test') TIOBucket = makeSuite(TestIOBuckets, 'test') TOOBucket = makeSuite(TestOOBuckets, 'test') TOIBucket = makeSuite(TestOIBuckets, 'test') TIIBucket = makeSuite(TestIIBuckets, 'test') alltests = TestSuite((TIOSet, TOOSet, TOISet, TIISet, TIOTreeSet, TOOTreeSet, TOITreeSet, TIITreeSet, TIOBucket, TOOBucket, TOIBucket, TIIBucket, TOOBTree, TIOBTree, TOIBTree, TIIBTree)) return alltests def lsubtract(l1, l2): l1=list(l1) l2=list(l2) l = filter(lambda x, l1=l1: x not in l1, l2) l = l + filter(lambda x, l2=l2: x not in l2, l1) return l def realseq(itemsob): return map(lambda x: x, itemsob) def main(): TextTestRunner().run(test_suite()) if __name__ == '__main__': main()
|
suite = TestSuite() for k in (TestIOBTrees, TestOOBTrees, TestOIBTrees, TestIIBTrees, TestIOSets, TestOOSets, TestOISets, TestIISets, TestIOTreeSets, TestOOTreeSets, TestOITreeSets, TestIITreeSets, TestIOBuckets, TestOOBuckets, TestOIBuckets, TestIIBuckets, NastyConfict): suite.addTest(makeSuite(k)) return suite
|
def test_suite(): TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = makeSuite(TestOOSets, 'test') TIOTreeSet = makeSuite(TestIOTreeSets, 'test') TOOTreeSet = makeSuite(TestOOTreeSets, 'test') TOITreeSet = makeSuite(TestIOTreeSets, 'test') TIITreeSet = makeSuite(TestOOTreeSets, 'test') TIOBucket = makeSuite(TestIOBuckets, 'test') TOOBucket = makeSuite(TestOOBuckets, 'test') TOIBucket = makeSuite(TestOIBuckets, 'test') TIIBucket = makeSuite(TestIIBuckets, 'test') alltests = TestSuite((TIOSet, TOOSet, TOISet, TIISet, TIOTreeSet, TOOTreeSet, TOITreeSet, TIITreeSet, TIOBucket, TOOBucket, TOIBucket, TIIBucket, TOOBTree, TIOBTree, TOIBTree, TIIBTree)) return alltests
|
try: get_transaction().register(self) self._p_changed=1
|
try: get_transaction().register(self)
|
def __changed__(self,v=-1): old=self._p_changed if v != -1: if v and not old and self._p_jar is not None: try: get_transaction().register(self) self._p_changed=1 except: pass
|
def open(self, read_only=0): addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1) def checkWriteMethods(self): if hasattr(ZODB, "__version__"): ReadOnlyStorage.ReadOnlyStorage.checkWriteMethods(self) class FileStorageTests(GenericTests): """Test ZEO backed by a FileStorage.""" level = 2
|
def open(self, read_only=0): # XXX Needed to support ReadOnlyStorage tests. Ought to be a # cleaner way. addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1)
|
|
def getStorage(self):
|
def getConfig(self):
|
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type BDBFullStorage name %s </Storage> """ % self._envdir
|
def getStorage(self):
|
def getConfig(self):
|
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type MappingStorage name %s </Storage> """ % self._envdir
|
def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) result.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': d, 'version': version, }) self._connectionMap(f)
|
for version, pool in self._pools.items(): for c in pool.all_as_list(): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) result.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': d, 'version': version, })
|
def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache))
|
commitVersion=abortVersion
|
def close(self): pass
|
|
raise UndoError, 'non-undoable transaction'
|
raise POSException.UndoError, 'non-undoable transaction'
|
def undo(self, transaction_id): raise UndoError, 'non-undoable transaction'
|
level=zLOG.BLATHER)
|
level=zLOG.DEBUG)
|
def handle_request(self, msgid, flags, name, args): if not self.check_method(name): msg = "Invalid method name: %s on %s" % (name, repr(self.obj)) raise ZRPCError(msg) if __debug__: self.log("calling %s%s" % (name, short_repr(args)), level=zLOG.BLATHER)
|
(msgid, short_repr(reply)), level=zLOG.DEBUG)
|
(msgid, short_repr(reply)), level=zLOG.TRACE)
|
def wait(self, msgid): """Invoke asyncore mainloop and wait for reply.""" if __debug__: self.log("wait(%d), async=%d" % (msgid, self.is_async()), level=zLOG.TRACE) if self.is_async(): self._pull_trigger()
|
zLOG.LOG("winserver", zLOG.BLATHER, "map: %r" % asyncore.socket_map)
|
zLOG.LOG(label, zLOG.DEBUG, "map: %r" % asyncore.socket_map)
|
def main(port, storage_name, rawargs): klass = load_storage_class(storage_name) args = [] for arg in rawargs: if arg.startswith('='): arg = eval(arg[1:], {'__builtins__': {}}) args.append(arg) storage = klass(*args) zeo_port = int(port) test_port = zeo_port + 1 t = ZEOTestServer(('', test_port), storage) serv = ZEO.StorageServer.StorageServer(('', zeo_port), {'1': storage}) import zLOG while asyncore.socket_map: zLOG.LOG("winserver", zLOG.BLATHER, "map: %r" % asyncore.socket_map) asyncore.poll(30.0)
|
tsize = 0
|
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), str(TimeStamp(trans.tid))) print >> file, "\toffset=%d status=%s user=%s description=%s" % \ (trans._tpos, `trans.status`, trans.user, trans.description) j = 0 tsize = 0 for rec in trans: if rec.data is None: fullclass = "undo or abort of object creation" else: modname, classname = get_pickle_metadata(rec.data) dig = md5.new(rec.data).hexdigest() fullclass = "%s.%s" % (modname, classname) # special case for testing purposes if fullclass == "ZODB.tests.MinPO.MinPO": obj = zodb_unpickle(rec.data) fullclass = "%s %s" % (fullclass, obj.value) if rec.version: version = "version=%s " % rec.version else: version = '' if rec.data_txn: # XXX It would be nice to print the transaction number # (i) but it would be too expensive to keep track of. bp = "bp=%016x" % u64(rec.data_txn) else: bp = "" if rec.data_txn: size = 8 + len(rec.version) else: if rec.data is None: # XXX why is rec.data None and rec.data_txn False? size = len(rec.version) else: size = len(rec.data) + len(rec.version) if rec.version: size += DATA_VERSION_HDR_LEN else: size += DATA_HDR_LEN tsize += size print >> file, " data #%05d oid=%016x %sclass=%s size=%d %s" % \ (j, u64(rec.oid), version, fullclass, size, bp) j += 1 print >> file i += 1 iter.close()
|
|
tsize += size
|
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), str(TimeStamp(trans.tid))) print >> file, "\toffset=%d status=%s user=%s description=%s" % \ (trans._tpos, `trans.status`, trans.user, trans.description) j = 0 tsize = 0 for rec in trans: if rec.data is None: fullclass = "undo or abort of object creation" else: modname, classname = get_pickle_metadata(rec.data) dig = md5.new(rec.data).hexdigest() fullclass = "%s.%s" % (modname, classname) # special case for testing purposes if fullclass == "ZODB.tests.MinPO.MinPO": obj = zodb_unpickle(rec.data) fullclass = "%s %s" % (fullclass, obj.value) if rec.version: version = "version=%s " % rec.version else: version = '' if rec.data_txn: # XXX It would be nice to print the transaction number # (i) but it would be too expensive to keep track of. bp = "bp=%016x" % u64(rec.data_txn) else: bp = "" if rec.data_txn: size = 8 + len(rec.version) else: if rec.data is None: # XXX why is rec.data None and rec.data_txn False? size = len(rec.version) else: size = len(rec.data) + len(rec.version) if rec.version: size += DATA_VERSION_HDR_LEN else: size += DATA_HDR_LEN tsize += size print >> file, " data #%05d oid=%016x %sclass=%s size=%d %s" % \ (j, u64(rec.oid), version, fullclass, size, bp) j += 1 print >> file i += 1 iter.close()
|
|
try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try:
|
if hasattr(signal, 'SIGXFSZ'): signal.signal(signal.SIGXFSZ, signal.SIG_IGN) if hasattr(signal, 'SIGTERM'): signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) if hasattr(signal, 'SIGUSR2'):
|
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: signal.signal(signal.SIGUSR2, rotate_logs_handler) except: pass
|
except: pass
|
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: signal.signal(signal.SIGUSR2, rotate_logs_handler) except: pass
|
|
import signal signal.signal(signal.SIGHUP, rotate_logs_handler)
|
def rotate_logs_handler(signum, frame): rotate_logs() import signal signal.signal(signal.SIGHUP, rotate_logs_handler)
|
|
h=v/t32 v=v%t32
|
h, v = divmod(v, t32)
|
def p64(v, pack=struct.pack): """Pack an integer or long into a 8-byte string""" if v < t32: h=0 else: h=v/t32 v=v%t32 return pack(">II", h, v)
|
if h < 0: h=t32+h v=h*t32+v
|
if h < 0: h = t32 + h v = (h << 32) + v
|
def u64(v, unpack=struct.unpack): """Unpack an 8-byte string into a 64-bit (or long) integer""" h, v = unpack(">ii", v) if v < 0: v=t32+v if h: if h < 0: h=t32+h v=h*t32+v return v
|
v=h*t32+v
|
v = (h << 32) + v
|
def U64(v, unpack=struct.unpack): """Same as u64 but always returns a long.""" h, v = unpack(">II", v) if h: v=h*t32+v return v
|
def log(msg, level=zLOG.INFO): zLOG.LOG("ZEC", level, msg)
|
def log(msg, level=zLOG.INFO): zLOG.LOG("ZEC", level, msg)
|
|
log("%s: storage=%r, size=%r; file[%r]=%r" % (self.__class__.__name__, storage, size, current, p[current])) self._limit = size / 2
|
self.log("%s: storage=%r, size=%r; file[%r]=%r" % (self.__class__.__name__, storage, size, current, p[current]))
|
def __init__(self, storage='1', size=20000000, client=None, var=None): # Arguments: # storage -- storage name (used in persistent cache file names only) # size -- size limit in bytes of both files together # client -- if not None, use a persistent cache file and use this name # var -- directory where to create persistent cache files
|
read_index(index, serial, f[not current], not current) self._pos = read_index(index, serial, f[current], current)
|
self.read_index(serial, not current) self._pos = self.read_index(serial, current)
|
def open(self): # Two tasks: # - Set self._index, self._get, and self._pos. # - Read and validate both cache files, returning a list of # serials to be used by verify(). # This may be called more than once (by the cache verification code). self._acquire() try: self._index = index = {} self._get = index.get serial = {} f = self._f current = self._current if f[not current] is not None: read_index(index, serial, f[not current], not current) self._pos = read_index(index, serial, f[current], current)
|
log("invalidate: short record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
self.log("invalidate: short record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
def invalidate(self, oid, version): self._acquire() try: p = self._get(oid, None) if p is None: self._trace(0x10, oid, version) return None f = self._f[p < 0] ap = abs(p) f.seek(ap) h = f.read(27) if len(h) != 27: log("invalidate: short record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0)) del self._index[oid] return None if h[:8] != oid: log("invalidate: oid mismatch: expected %16x read %16x " "at position %d in cache file %d" % (U64(oid), U64(h[:8]), ap, p < 0)) del self._index[oid] return None f.seek(ap+8) # Switch from reading to writing if version and h[15:19] != '\0\0\0\0': self._trace(0x1A, oid, version) # There's still relevant non-version data in the cache record f.write('n') else: self._trace(0x1C, oid, version) del self._index[oid] f.write('i') finally: self._release()
|
log("invalidate: oid mismatch: expected %16x read %16x " "at position %d in cache file %d" % (U64(oid), U64(h[:8]), ap, p < 0))
|
self.log("invalidate: oid mismatch: expected %16x read %16x " "at position %d in cache file %d" % (U64(oid), U64(h[:8]), ap, p < 0))
|
def invalidate(self, oid, version): self._acquire() try: p = self._get(oid, None) if p is None: self._trace(0x10, oid, version) return None f = self._f[p < 0] ap = abs(p) f.seek(ap) h = f.read(27) if len(h) != 27: log("invalidate: short record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0)) del self._index[oid] return None if h[:8] != oid: log("invalidate: oid mismatch: expected %16x read %16x " "at position %d in cache file %d" % (U64(oid), U64(h[:8]), ap, p < 0)) del self._index[oid] return None f.seek(ap+8) # Switch from reading to writing if version and h[15:19] != '\0\0\0\0': self._trace(0x1A, oid, version) # There's still relevant non-version data in the cache record f.write('n') else: self._trace(0x1C, oid, version) del self._index[oid] f.write('i') finally: self._release()
|
log("load: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
self.log("load: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
def load(self, oid, version): self._acquire() try: p = self._get(oid, None) if p is None: self._trace(0x20, oid, version) return None f = self._f[p < 0] ap = abs(p) seek = f.seek read = f.read seek(ap) h = read(27) if len(h)==27 and h[8] in 'nv' and h[:8]==oid: tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen+dlen > tlen: log("load: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0)) del self._index[oid] return None
|
log("modifiedInVersion: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
self.log("modifiedInVersion: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0))
|
def modifiedInVersion(self, oid): # This should return: # - The version from the record for oid, if there is one. # - '', if there is no version in the record and its status is 'v'. # - None, if we don't know: no valid record or status is 'n'. self._acquire() try: p = self._get(oid, None) if p is None: self._trace(0x40, oid) return None f = self._f[p < 0] ap = abs(p) seek = f.seek read = f.read seek(ap) h = read(27) if len(h)==27 and h[8] in 'nv' and h[:8]==oid: tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen+dlen > tlen: log("modifiedInVersion: bad record for oid %16x " "at position %d in cache file %d" % (U64(oid), ap, p < 0)) del self._index[oid] return None
|
log("flipping cache files. new current = %d" % current)
|
self.log("flipping cache files. new current = %d" % current)
|
def checkSize(self, size): # Make sure we aren't going to exceed the target size. # If we are, then flip the cache. self._acquire() try: if self._pos + size > self._limit: current = not self._current self._current = current self._trace(0x70) log("flipping cache files. new current = %d" % current) # Delete the half of the index that's no longer valid index = self._index for oid in index.keys(): if (index[oid] < 0) == current: del index[oid] if self._p[current] is not None: # Persistent cache file: remove the old file # before opening the new one, because the old file # may be owned by root (created before setuid()). if self._f[current] is not None: self._f[current].close() try: os.remove(self._p[current]) except: pass self._f[current] = open(self._p[current],'w+b') else: # Temporary cache file: self._f[current] = tempfile.TemporaryFile(suffix='.zec') self._f[current].write(magic) self._pos = 4 finally: self._release()
|
log("cannot write tracefile %s (%s)" % (tfn, msg)) else: log("opened tracefile %s" % tfn)
|
self.log("cannot write tracefile %s (%s)" % (tfn, msg)) else: self.log("opened tracefile %s" % tfn)
|
def _setup_trace(self): # See if cache tracing is requested through $ZEO_CACHE_TRACE. # If not, or if we can't write to the trace file, # disable tracing by setting self._trace to a dummy function. self._tracefile = None tfn = os.environ.get("ZEO_CACHE_TRACE") if tfn: try: self._tracefile = open(tfn, "ab") self._trace(0x00) except IOError, msg: self._tracefile = None log("cannot write tracefile %s (%s)" % (tfn, msg)) else: log("opened tracefile %s" % tfn) if self._tracefile is None: def notrace(*args): pass self._trace = notrace
|
def read_index(index, serial, f, fileindex): seek = f.seek read = f.read pos = 4 count = 0 while 1:
|
def read_index(self, serial, fileindex): index = self._index f = self._f[fileindex] seek = f.seek read = f.read pos = 4 count = 0 while 1: f.seek(pos) h = read(27) if len(h) != 27: if h: self.rilog("truncated header", pos, fileindex) break if h[8] in 'vni': tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen + dlen > tlen: self.rilog("invalid header data", pos, fileindex) break oid = h[:8] if h[8] == 'v' and vlen: seek(dlen+vlen, 1) vdlen = read(4) if len(vdlen) != 4: self.rilog("truncated record", pos, fileindex) break vdlen = unpack(">i", vdlen)[0] if vlen+dlen+43+vdlen != tlen: self.rilog("inconsistent lengths", pos, fileindex) break seek(vdlen, 1) vs = read(8) if read(4) != h[9:13]: self.rilog("inconsistent tlen", pos, fileindex) break else: if h[8] in 'vn' and vlen == 0: if dlen+31 != tlen: self.rilog("inconsistent nv lengths", pos, fileindex) seek(dlen, 1) if read(4) != h[9:13]: self.rilog("inconsistent nv tlen", pos, fileindex) break vs = None if h[8] in 'vn': if fileindex: index[oid] = -pos else: index[oid] = pos serial[oid] = h[-8:], vs else: if serial.has_key(oid): del serial[oid] del index[oid] pos = pos + tlen count += 1
|
def read_index(index, serial, f, fileindex): seek = f.seek read = f.read pos = 4 count = 0 while 1: f.seek(pos) h = read(27) if len(h) != 27: # An empty read is expected, anything else is suspect if h: rilog("truncated header", pos, fileindex) break if h[8] in 'vni': tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen + dlen > tlen: rilog("invalid header data", pos, fileindex) break oid = h[:8] if h[8] == 'v' and vlen: seek(dlen+vlen, 1) vdlen = read(4) if len(vdlen) != 4: rilog("truncated record", pos, fileindex) break vdlen = unpack(">i", vdlen)[0] if vlen+dlen+43+vdlen != tlen: rilog("inconsistent lengths", pos, fileindex) break seek(vdlen, 1) vs = read(8) if read(4) != h[9:13]: rilog("inconsistent tlen", pos, fileindex) break else: if h[8] in 'vn' and vlen == 0: if dlen+31 != tlen: rilog("inconsistent nv lengths", pos, fileindex) seek(dlen, 1) if read(4) != h[9:13]: rilog("inconsistent nv tlen", pos, fileindex) break vs = None if h[8] in 'vn': if fileindex: index[oid] = -pos else: index[oid] = pos serial[oid] = h[-8:], vs else: if serial.has_key(oid): # We have a record for this oid, but it was invalidated! del serial[oid] del index[oid] pos = pos + tlen count += 1 f.seek(pos) try: f.truncate() except: pass if count: log("read_index: cache file %d has %d records and %d bytes" % (fileindex, count, pos)) return pos
|
h = read(27) if len(h) != 27: if h: rilog("truncated header", pos, fileindex) break if h[8] in 'vni': tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen + dlen > tlen: rilog("invalid header data", pos, fileindex) break oid = h[:8] if h[8] == 'v' and vlen: seek(dlen+vlen, 1) vdlen = read(4) if len(vdlen) != 4: rilog("truncated record", pos, fileindex) break vdlen = unpack(">i", vdlen)[0] if vlen+dlen+43+vdlen != tlen: rilog("inconsistent lengths", pos, fileindex) break seek(vdlen, 1) vs = read(8) if read(4) != h[9:13]: rilog("inconsistent tlen", pos, fileindex) break else: if h[8] in 'vn' and vlen == 0: if dlen+31 != tlen: rilog("inconsistent nv lengths", pos, fileindex) seek(dlen, 1) if read(4) != h[9:13]: rilog("inconsistent nv tlen", pos, fileindex) break vs = None if h[8] in 'vn': if fileindex: index[oid] = -pos else: index[oid] = pos serial[oid] = h[-8:], vs else: if serial.has_key(oid): del serial[oid] del index[oid] pos = pos + tlen count += 1 f.seek(pos) try: f.truncate() except: pass if count: log("read_index: cache file %d has %d records and %d bytes" % (fileindex, count, pos)) return pos def rilog(msg, pos, fileindex): log("read_index: %s at position %d in cache file %d" % (msg, pos, fileindex))
|
try: f.truncate() except: pass if count: self.log("read_index: cache file %d has %d records and %d bytes" % (fileindex, count, pos)) return pos def rilog(self, msg, pos, fileindex): self.log("read_index: %s at position %d in cache file %d" % (msg, pos, fileindex)) def log(self, msg, level=zLOG.INFO): zLOG.LOG("ZEC:%s" % self._storage, level, msg)
|
def read_index(index, serial, f, fileindex): seek = f.seek read = f.read pos = 4 count = 0 while 1: f.seek(pos) h = read(27) if len(h) != 27: # An empty read is expected, anything else is suspect if h: rilog("truncated header", pos, fileindex) break if h[8] in 'vni': tlen, vlen, dlen = unpack(">iHi", h[9:19]) else: tlen = -1 if tlen <= 0 or vlen < 0 or dlen < 0 or vlen + dlen > tlen: rilog("invalid header data", pos, fileindex) break oid = h[:8] if h[8] == 'v' and vlen: seek(dlen+vlen, 1) vdlen = read(4) if len(vdlen) != 4: rilog("truncated record", pos, fileindex) break vdlen = unpack(">i", vdlen)[0] if vlen+dlen+43+vdlen != tlen: rilog("inconsistent lengths", pos, fileindex) break seek(vdlen, 1) vs = read(8) if read(4) != h[9:13]: rilog("inconsistent tlen", pos, fileindex) break else: if h[8] in 'vn' and vlen == 0: if dlen+31 != tlen: rilog("inconsistent nv lengths", pos, fileindex) seek(dlen, 1) if read(4) != h[9:13]: rilog("inconsistent nv tlen", pos, fileindex) break vs = None if h[8] in 'vn': if fileindex: index[oid] = -pos else: index[oid] = pos serial[oid] = h[-8:], vs else: if serial.has_key(oid): # We have a record for this oid, but it was invalidated! del serial[oid] del index[oid] pos = pos + tlen count += 1 f.seek(pos) try: f.truncate() except: pass if count: log("read_index: cache file %d has %d records and %d bytes" % (fileindex, count, pos)) return pos
|
cs = ClientStorage(addr, storage=storage, debug=1, wait=1)
|
cs = ClientStorage(addr, storage=storage, wait=1, read_only=1)
|
def check_server(addr, storage, write): if ZEO_VERSION == 2: cs = ClientStorage(addr, storage=storage, debug=1, wait=1) else: cs = ClientStorage(addr, storage=storage, debug=1, wait_for_server_on_startup=1) # _startup() is an artifact of the way ZEO 1.0 works. The # ClientStorage doesn't get fully initialized until registerDB() # is called. The only thing we care about, though, is that # registerDB() calls _startup(). db = ZODB.DB(cs) cn = db.open() root = cn.root() if write: try: root['zeoup'] = root.get('zeoup', 0)+ 1 get_transaction().commit() except ConflictError: pass cn.close() db.close()
|
def testEmptyFirstBucketReportedByGuido(self):
|
def XXXtestEmptyFirstBucketReportedByGuido(self):
|
def testEmptyFirstBucketReportedByGuido(self): b = self.t for i in xrange(29972): # reduce to 29971 and it works b[i] = i for i in xrange(30): # reduce to 29 and it works del b[i] b[i+40000] = i
|
if not self._oids: self._oids = self._server.new_oids() self._oids.reverse() oid = self._oids.pop() self._oid_lock.release() return oid
|
try: if not self._oids: self._oids = self._server.new_oids() self._oids.reverse() return self._oids.pop() finally: self._oid_lock.release()
|
def new_oid(self, last=None): if self._is_read_only: raise POSException.ReadOnlyError() # avoid multiple oid requests to server at the same time self._oid_lock.acquire() if not self._oids: self._oids = self._server.new_oids() self._oids.reverse() oid = self._oids.pop() self._oid_lock.release() return oid
|
0
|
False
|
def __getattr__(self, name): """Get attributes that can't be gotten the usual way
|
try: del self._keys
|
try: del self._v_keys
|
def __setitem__(self, key, v):
|
try: del self._keys
|
try: del self._v_keys
|
def __delitem__(self, key):
|
try: return self._keys
|
try: return self._v_keys
|
def keys(self):
|
keys=self._keys=filter(
|
keys=self._v_keys=filter(
|
def keys(self):
|
if hasattr(self,'_keys'): del self._keys
|
if hasattr(self,'_v_keys'): del self._v_keys
|
def clear(self):
|
another. `other' must have an .iterator() method.
|
another. `other` must have an .iterator() method.
|
def copyTransactionsFrom(self, other, verbose=0): """Copy transactions from another storage.
|
opts, args = getopt.getopt(args, 'zd:n:Ds:M')
|
opts, args = getopt.getopt(args, 'zd:n:Ds:L')
|
def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:M') z=s=None data=sys.argv[0] nrep=5 minimize=0 for o, v in opts: if o=='-n': nrep=string.atoi(v) elif o=='-d': data=v elif o=='-s': s=v elif o=='-z': global zlib import zlib z=compress elif o=='-M': minimize=1 elif o=='-D': global debug os.environ['STUPID_LOG_FILE']='' os.environ['STUPID_LOG_SEVERITY']='-999' __builtins__.__debug__=1 if s: s=__import__(s, globals(), globals(), ('__doc__',)) s=s.Storage else: s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1) data=open(data).read() db=ZODB.DB(s, # disable cache deactivation cache_size=4000, cache_deactivate_after=6000,) results={} for j in range(nrep): for r in 1, 10, 100, 1000: t=time.time() jar=db.open() get_transaction().begin() rt=jar.root() key='s%s' % r if rt.has_key(key): p=rt[key] else: rt[key]=p=P() for i in range(r): if z is not None: d=z(data) else: d=data v=getattr(p, str(i), P()) v.d=d setattr(p,str(i),v) get_transaction().commit() jar.close() sys.stderr.write("%s %s %s\n" % (j, r, time.time()-t)) sys.stdout.flush() rt=d=p=v=None # release all references if minimize: time.sleep(3) jar.cacheMinimize(3)
|
self._cache.invalidate(object._p_oid)
|
if object._p_oid is not None: self._cache.invalidate(object._p_oid)
|
def abort(self, object, transaction): """Abort the object in the transaction.
|
self._invalidated[oid]=1
|
assert oid is not None self._invalidated[oid] = 1
|
def invalidate(self, oid): """Invalidate a particular oid
|
msg = "Shouldn't load state for %s when the connection is closed" % `oid` LOG('ZODB',ERROR, msg)
|
msg = ("Shouldn't load state for %s " "when the connection is closed" % `oid`) LOG('ZODB', ERROR, msg)
|
def setstate(self, object): oid=object._p_oid
|
invalidate=self._db.invalidate
|
def _invalidate_invalidating(self): invalidate=self._db.invalidate for oid in self._invalidating: invalidate(oid, self) self._db.finish_invalidation()
|
|
invalidate(oid, self)
|
assert oid is not None self._db.invalidate(oid, self)
|
def _invalidate_invalidating(self): invalidate=self._db.invalidate for oid in self._invalidating: invalidate(oid, self) self._db.finish_invalidation()
|
res = (self._load_count, self._store_count)
|
res = self._load_count, self._store_count
|
def getTransferCounts(self, clear=0): """Returns the number of objects loaded and stored.
|
def setCacheDeactivateAfter(self, v): self._cache_deactivate_after=v def setCacheSize(self, v): self._cache_size=v
|
def setCacheDeactivateAfter(self, v): self._cache_deactivate_after=v for c in self._pools[0][''][1]: c._cache.cache_age=v def setCacheSize(self, v): self._cache_size=v for c in self._pools[0][''][1]: c._cache.cache_size=v
|
def setCacheDeactivateAfter(self, v): self._cache_deactivate_after=v
|
def setVersionCacheSize(self, v): self._version_cache_size=v
|
for ver in self._pools[0].keys(): if ver: for c in self._pools[0][ver][1]: c._cache.cache_age=v def setVersionCacheSize(self, v): self._version_cache_size=v for ver in self._pools[0].keys(): if v: for c in self._pools[0][ver][1]: c._cache.cache_size=v
|
def setVersionCacheSize(self, v): self._version_cache_size=v
|
__traceback_info__=oid
|
__traceback_info__ = (oid, p)
|
def __getitem__(self, oid, tt=type(()), ct=type(HelperClass)): cache=self._cache if cache.has_key(oid): return cache[oid]
|
object = unpickler.load()
|
try: object = unpickler.load() except: raise "Could not load oid %s, pickled data in traceback info may\ contain clues" % (oid)
|
def __getitem__(self, oid, tt=type(()), ct=type(HelperClass)): cache=self._cache if cache.has_key(oid): return cache[oid]
|
current=not current
|
current=not self._current
|
def checkSize(self, size): # Make sure we aren't going to exceed the target size. # If we are, then flip the cache. if self._pos+size > self._limit: current=not current self._current=current self._f[current]=open(self._p[current],'w+b') self._f[current].write(magic) self._pos=pos=4
|
unpickler.load() state = unpickler.load()
|
try: unpickler.load() state = unpickler.load() except: t, v =sys.exc_info()[:2] raise
|
def setstate(self,object): # Note, we no longer mess with the object's state # flag, _p_changed. This is the object's job. oid=object._p_oid invalid=self._invalid if invalid(oid) or invalid(None): raise ConflictError, oid p, serial = self._storage.load(oid, self._version) file=StringIO(p) unpickler=Unpickler(file) unpickler.persistent_load=self._persistent_load unpickler.load() state = unpickler.load() if hasattr(object, '__setstate__'): object.__setstate__(state) else: d=object.__dict__ for k,v in state.items(): d[k]=v object._p_serial=serial
|
unless = self.failUnless
|
require = self.assert_
|
def checkTimeoutProvokingConflicts(self): eq = self.assertEqual raises = self.assertRaises unless = self.failUnless self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty unless(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # We need to successfully commit an object now so we have something to # conflict about. t = Transaction() storage.tpc_begin(t) revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t) revid1b = storage.tpc_vote(t) revid1 = handle_serials(oid, revid1a, revid1b) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 8 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) # Now sleep long enough for the storage to time out time.sleep(3) storage.sync() unless(not storage.is_connected()) storage._wait() unless(storage.is_connected()) # We expect finish to fail raises(ClientDisconnected, storage.tpc_finish, t) # Now we think we've committed the second transaction, but we really # haven't. A third one should produce a POSKeyError on the server, # which manifests as a ConflictError on the client. obj.value = 9 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) raises(ConflictError, storage.tpc_vote, t) # Even aborting won't help storage.tpc_abort(t) storage.tpc_finish(t) # Try again obj.value = 10 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) # Even aborting won't help raises(ConflictError, storage.tpc_vote, t) # Abort this one and try a transaction that should succeed storage.tpc_abort(t) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 11 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) storage.tpc_finish(t) # Now load the object and verify that it has a value of 11 data, revid = storage.load(oid, '') eq(zodb_unpickle(data), MinPO(11)) eq(revid, revid2)
|
unless(not list(storage._cache.contents()))
|
require(not list(storage._cache.contents()))
|
def checkTimeoutProvokingConflicts(self): eq = self.assertEqual raises = self.assertRaises unless = self.failUnless self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty unless(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # We need to successfully commit an object now so we have something to # conflict about. t = Transaction() storage.tpc_begin(t) revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t) revid1b = storage.tpc_vote(t) revid1 = handle_serials(oid, revid1a, revid1b) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 8 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) # Now sleep long enough for the storage to time out time.sleep(3) storage.sync() unless(not storage.is_connected()) storage._wait() unless(storage.is_connected()) # We expect finish to fail raises(ClientDisconnected, storage.tpc_finish, t) # Now we think we've committed the second transaction, but we really # haven't. A third one should produce a POSKeyError on the server, # which manifests as a ConflictError on the client. obj.value = 9 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) raises(ConflictError, storage.tpc_vote, t) # Even aborting won't help storage.tpc_abort(t) storage.tpc_finish(t) # Try again obj.value = 10 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) # Even aborting won't help raises(ConflictError, storage.tpc_vote, t) # Abort this one and try a transaction that should succeed storage.tpc_abort(t) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 11 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) storage.tpc_finish(t) # Now load the object and verify that it has a value of 11 data, revid = storage.load(oid, '') eq(zodb_unpickle(data), MinPO(11)) eq(revid, revid2)
|
time.sleep(3)
|
deadline = time.time() + 60 while time.time() < deadline: if storage.is_connected(): time.sleep(self.timeout / 1.8) storage.sync() else: break
|
def checkTimeoutProvokingConflicts(self): eq = self.assertEqual raises = self.assertRaises unless = self.failUnless self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty unless(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # We need to successfully commit an object now so we have something to # conflict about. t = Transaction() storage.tpc_begin(t) revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t) revid1b = storage.tpc_vote(t) revid1 = handle_serials(oid, revid1a, revid1b) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 8 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) # Now sleep long enough for the storage to time out time.sleep(3) storage.sync() unless(not storage.is_connected()) storage._wait() unless(storage.is_connected()) # We expect finish to fail raises(ClientDisconnected, storage.tpc_finish, t) # Now we think we've committed the second transaction, but we really # haven't. A third one should produce a POSKeyError on the server, # which manifests as a ConflictError on the client. obj.value = 9 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) raises(ConflictError, storage.tpc_vote, t) # Even aborting won't help storage.tpc_abort(t) storage.tpc_finish(t) # Try again obj.value = 10 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) # Even aborting won't help raises(ConflictError, storage.tpc_vote, t) # Abort this one and try a transaction that should succeed storage.tpc_abort(t) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 11 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) storage.tpc_finish(t) # Now load the object and verify that it has a value of 11 data, revid = storage.load(oid, '') eq(zodb_unpickle(data), MinPO(11)) eq(revid, revid2)
|
unless(not storage.is_connected())
|
require(not storage.is_connected())
|
def checkTimeoutProvokingConflicts(self): eq = self.assertEqual raises = self.assertRaises unless = self.failUnless self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty unless(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # We need to successfully commit an object now so we have something to # conflict about. t = Transaction() storage.tpc_begin(t) revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t) revid1b = storage.tpc_vote(t) revid1 = handle_serials(oid, revid1a, revid1b) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 8 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) # Now sleep long enough for the storage to time out time.sleep(3) storage.sync() unless(not storage.is_connected()) storage._wait() unless(storage.is_connected()) # We expect finish to fail raises(ClientDisconnected, storage.tpc_finish, t) # Now we think we've committed the second transaction, but we really # haven't. A third one should produce a POSKeyError on the server, # which manifests as a ConflictError on the client. obj.value = 9 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) raises(ConflictError, storage.tpc_vote, t) # Even aborting won't help storage.tpc_abort(t) storage.tpc_finish(t) # Try again obj.value = 10 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) # Even aborting won't help raises(ConflictError, storage.tpc_vote, t) # Abort this one and try a transaction that should succeed storage.tpc_abort(t) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 11 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) storage.tpc_finish(t) # Now load the object and verify that it has a value of 11 data, revid = storage.load(oid, '') eq(zodb_unpickle(data), MinPO(11)) eq(revid, revid2)
|
unless(storage.is_connected())
|
require(storage.is_connected())
|
def checkTimeoutProvokingConflicts(self): eq = self.assertEqual raises = self.assertRaises unless = self.failUnless self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty unless(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # We need to successfully commit an object now so we have something to # conflict about. t = Transaction() storage.tpc_begin(t) revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t) revid1b = storage.tpc_vote(t) revid1 = handle_serials(oid, revid1a, revid1b) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 8 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) # Now sleep long enough for the storage to time out time.sleep(3) storage.sync() unless(not storage.is_connected()) storage._wait() unless(storage.is_connected()) # We expect finish to fail raises(ClientDisconnected, storage.tpc_finish, t) # Now we think we've committed the second transaction, but we really # haven't. A third one should produce a POSKeyError on the server, # which manifests as a ConflictError on the client. obj.value = 9 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) raises(ConflictError, storage.tpc_vote, t) # Even aborting won't help storage.tpc_abort(t) storage.tpc_finish(t) # Try again obj.value = 10 t = Transaction() storage.tpc_begin(t) storage.store(oid, revid2, zodb_pickle(obj), '', t) # Even aborting won't help raises(ConflictError, storage.tpc_vote, t) # Abort this one and try a transaction that should succeed storage.tpc_abort(t) storage.tpc_finish(t) # Now do a store, sleeping before the finish so as to cause a timeout obj.value = 11 t = Transaction() storage.tpc_begin(t) revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t) revid2b = storage.tpc_vote(t) revid2 = handle_serials(oid, revid2a, revid2b) storage.tpc_finish(t) # Now load the object and verify that it has a value of 11 data, revid = storage.load(oid, '') eq(zodb_unpickle(data), MinPO(11)) eq(revid, revid2)
|
if j is None: continue i=id(j) if not jars.has_key(i): jars[i]=j if subtransaction: subj[i]=j j.tpc_begin(self, subtransaction) else: j.tpc_begin(self) j.commit(o,self)
|
if j is not None: i=id(j) if not jars.has_key(i): jars[i]=j if subtransaction: subj[i]=j j.tpc_begin(self, subtransaction) else: j.tpc_begin(self) j.commit(o,self)
|
def commit(self, subtransaction=None): 'Finalize the transaction'
|
i=i+22+vlen+dlen
|
i=i+14+vlen+dlen
|
def tpc_finish(self, transaction, f=None): self._lock_acquire() try: if transaction is not self._transaction: return if f is not None: f()
|
l=allocate_lock() self._a=l.acquire self._r=l.release self._pools={},[] self._temps=[] self._pool_size=pool_size self._cache_size=cache_size self._cache_deactivate_after=cache_deactivate_after self._version_pool_size=version_pool_size self._version_cache_size=version_cache_size self._version_cache_deactivate_after=version_cache_deactivate_after self._miv_cache={}
|
def __init__(self, storage, pool_size=7, cache_size=400, cache_deactivate_after=60, version_pool_size=3, version_cache_size=100, version_cache_deactivate_after=10, ): """Create an object database.
|
|
cs = ClientStorage(addr, storage=storage,
|
cs = ClientStorage(addr, storage=storage, debug=1,
|
def check_server(addr, storage): cs = ClientStorage(addr, storage=storage, wait_for_server_on_startup=0) # _startup() is an artifact of the way ZEO 1.0 works. The # ClientStorage doesn't get fully initialized until registerDB() # is called. The only thing we care about, though, is that # registerDB() calls _startup(). # XXX Is connecting a DB with wait_for_server_on_startup=0 a # sufficient test for upness? db = ZODB.DB(cs) db.close()
|
cache.checkSize(size)
|
cache.checkSize(0)
|
def load(self, oid, version, _stuff=None): self._lock_acquire() try: cache=self._cache p = cache.load(oid, version) if p: return p p, s, v, pv, sv = self._call('zeoLoad', oid) cache.checkSize(size) cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: if s: return p, s raise KeyError, oid # no non-version data for this return pv, sv finally: self._lock_release()
|
print "newargs", repr(newargs)
|
def serialize(self, obj): # We don't use __class__ here, because obj could be a persistent proxy. # We don't want to be folled by proxies. klass = type(obj)
|
|
cmd = "%s %s" % (sys.executable, ZEO.start.__file__) if cmd[-1] == "c": cmd = cmd[:-1]
|
def removefs(base): """Remove all files created by FileStorage with path base.""" for ext in '', '.old', '.tmp', '.lock', '.index', '.pack': path = base + ext try: os.remove(path) except os.error, err: if err[0] != errno.ENOENT: raise
|
|
self.env = Environment(self.cmd)
|
def setUp(self): self.pids = {} self.env = Environment(self.cmd)
|
|
buf1 = open(logfile1).read()
|
for i in range(10): try: buf1 = open(logfile1).read() except IOError, e: if e.errno != errno.ENOENT: raise time.sleep(1) else: break
|
def testLogRestart(self): port = 9090 logfile1 = tempfile.mktemp(suffix="log") logfile2 = tempfile.mktemp(suffix="log") os.environ["EVENT_LOG_FILE"] = logfile1
|
t=self.t t.update([(1,1),(5,5),(9,9)]) self.assertEqual(list(t.keys(-6,-4)),[], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)),[], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)),[], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)),[], list(t.keys(10,12)))
|
t = self.t t.update([(1,1), (5,5), (9,9)]) self.assertEqual(list(t.keys(-6,-4)), [], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)), [], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)), [], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)), [], list(t.keys(10,12))) self.assertEqual(list(t.keys(9, 1)), [], list(t.keys(9, 1)))
|
def testEmptyRangeSearches(self): t=self.t t.update([(1,1),(5,5),(9,9)]) self.assertEqual(list(t.keys(-6,-4)),[], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)),[], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)),[], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)),[], list(t.keys(10,12)))
|
t=self.t t.update([1,5,9]) self.assertEqual(list(t.keys(-6,-4)),[], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)),[], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)),[], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)),[], list(t.keys(10,12)))
|
t = self.t t.update([1, 5, 9]) self.assertEqual(list(t.keys(-6,-4)), [], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)), [], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)), [], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)), [], list(t.keys(10,12))) self.assertEqual(list(t.keys(9,1)), [], list(t.keys(9,1)))
|
def testEmptyRangeSearches(self): t=self.t t.update([1,5,9]) self.assertEqual(list(t.keys(-6,-4)),[], list(t.keys(-6,-4))) self.assertEqual(list(t.keys(2,4)),[], list(t.keys(2,4))) self.assertEqual(list(t.keys(6,8)),[], list(t.keys(6,8))) self.assertEqual(list(t.keys(10,12)),[], list(t.keys(10,12)))
|
class ConnectionTests(StorageTestBase.StorageTestBase):
|
class ConnectionTests(StorageTestBase):
|
def invalidate(self, *args): pass
|
__super_tearDown = StorageTestBase.StorageTestBase.tearDown
|
__super_setUp = StorageTestBase.setUp __super_tearDown = StorageTestBase.tearDown
|
def invalidate(self, *args): pass
|
"""Start a ZEO server using a Unix domain socket The ZEO server uses the storage object returned by the getStorage() method.
|
"""Test setup for connection tests. This starts only one server; a test may start more servers by calling self._newAddr() and then self.startServer(index=i) for i in 1, 2, ...
|
def setUp(self): """Start a ZEO server using a Unix domain socket
|
read_only=0, read_only_fallback=0): base = ClientStorage(self.addr, client=cache, cache_size=cache_size, wait=wait, min_disconnect_poll=0.1, read_only=read_only, read_only_fallback=read_only_fallback) storage = base
|
read_only=0, read_only_fallback=0, addr=None): if addr is None: addr = self.addr storage = ClientStorage(addr, client=cache, cache_size=cache_size, wait=wait, min_disconnect_poll=0.1, read_only=read_only, read_only_fallback=read_only_fallback)
|
def openClientStorage(self, cache='', cache_size=200000, wait=1, read_only=0, read_only_fallback=0): base = ClientStorage(self.addr, client=cache, cache_size=cache_size, wait=wait, min_disconnect_poll=0.1, read_only=read_only, read_only_fallback=read_only_fallback) storage = base storage.registerDB(DummyDB(), None) return storage
|
select.error, thread.error, socket.error):
|
select.error, threading.ThreadError, socket.error):
|
def checkReconnectSwitch(self): # A fallback client initially connects to a read-only server, # then discovers a read-write server and switches to that
|
except (Disconnected, select.error, thread.error, socket.error):
|
except (Disconnected, select.error, threading.ThreadError, socket.error):
|
def checkReconnection(self): # Check that the client reconnects when a server restarts.
|
self._reader = ConnectionObjectReader(self, self._cache, self._db._classFactory)
|
def _setDB(self, odb): """Begin a new transaction.
|
|
os.environ["EVENT_LOG_FILE"] = logfile1
|
os.environ["STUPID_LOG_FILE"] = logfile1
|
def testLogRestart(self): port = 9090 logfile1 = tempfile.mktemp(suffix="log") logfile2 = tempfile.mktemp(suffix="log") os.environ["EVENT_LOG_FILE"] = logfile1
|
_CONNECT_OK = (0, errno.WSAEISCONN)
|
_CONNECT_OK = (0, errno.WSAEISCONN, errno.WSAEINVAL)
|
def notify_closed(self): self.connected = 0 self.connection = None self.client.notifyDisconnected() if not self.closed: self.connect()
|
self._db=self._storage=self._tmp=self.new_oid=None
|
self._db=self._storage=self._tmp=self.new_oid=self._opened=None self._debug_info=()
|
def close(self): self._incrgc() db=self._db self._db=self._storage=self._tmp=self.new_oid=None db._closeConnection(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.