[Zodb-checkins] CVS: ZODB4/src/zodb/storage - base.py:1.17 bdbfull.py:1.14 bdbminimal.py:1.13 file.py:1.12 fsdump.py:1.4 interfaces.py:1.10 mapping.py:1.6

Barry Warsaw barry@wooz.org
Thu, 13 Mar 2003 16:32:59 -0500


Update of /cvs-repository/ZODB4/src/zodb/storage
In directory cvs.zope.org:/tmp/cvs-serv27419/src/zodb/storage

Modified Files:
	base.py bdbfull.py bdbminimal.py file.py fsdump.py 
	interfaces.py mapping.py 
Log Message:
> I believe we're ready to merge back to the head.

merging the opaque-pickles-branch back into the head



=== ZODB4/src/zodb/storage/base.py 1.16 => 1.17 ===
--- ZODB4/src/zodb/storage/base.py:1.16	Thu Feb 27 18:20:13 2003
+++ ZODB4/src/zodb/storage/base.py	Thu Mar 13 16:32:28 2003
@@ -1,4 +1,3 @@
-
 ##############################################################################
 #
 # Copyright (c) 2001 Zope Corporation and Contributors.
@@ -41,14 +40,13 @@
     berkeley_is_available = False
 
 from zodb.timestamp import newTimeStamp, TimeStamp
-from zodb.interfaces import ITransactionAttrs
+from zodb.interfaces import ITransactionAttrs, ZERO
 from zodb.storage.interfaces import StorageTransactionError, ReadOnlyError
 # BaseStorage provides primitives for lock acquisition and release, and a host
 # of other methods, some of which are overridden here, some of which are not.
 from zodb.lockfile import LockFile
 from zodb.serialize import findrefs
 
-ZERO = '\0'*8
 GBYTES = 1024 * 1024 * 1000
 JOIN_TIME = 10
 
@@ -335,11 +333,25 @@
             for r in transaction:
                 if verbose:
                     print `r.oid`, r.version, len(r.data)
-                self.restore(r.oid, r.serial, r.data, r.version,
+                self.restore(r.oid, r.serial, r.data, r.refs, r.version,
                              r.data_txn, transaction)
             self.tpcVote(transaction)
             self.tpcFinish(transaction)
 
+# A couple of convenience methods
+def splitrefs(refstr, oidlen=8):
+    # refstr is a packed string of reference oids.  Always return a list of
+    # oid strings.  Most storages use fixed oid lengths of 8 bytes, but if
+    # the oids in refstr are a different size, use oidlen to specify.  This
+    # does /not/ support variable length oids in refstr.
+    if not refstr:
+        return []
+    num, extra = divmod(len(refstr), oidlen)
+    fmt = '%ds' % oidlen
+    assert extra == 0, refstr
+    return list(struct.unpack('>' + (fmt * num), refstr))
+
+
 
 class BerkeleyConfig:
     """Bag of attributes for configuring Berkeley based storages.
@@ -497,6 +509,7 @@
         self._serials = self._setupDB('serials', db.DB_DUP)
         self._pickles = self._setupDB('pickles')
         self._refcounts = self._setupDB('refcounts')
+        self._references = self._setupDB('references')
         self._oids = self._setupDB('oids')
         self._pending = self._setupDB('pending')
         self._packmark = self._setupDB('packmark')
@@ -695,8 +708,8 @@
         self._env.close()
 
     # A couple of convenience methods
-    def _update(self, deltas, data, incdec):
-        for oid in findrefs(data):
+    def _update(self, deltas, references, incdec):
+        for oid in splitrefs(references):
             rc = deltas.get(oid, 0) + incdec
             if rc == 0:
                 # Save space in the dict by zapping zeroes


=== ZODB4/src/zodb/storage/bdbfull.py 1.13 => 1.14 === (487/587 lines abridged)
--- ZODB4/src/zodb/storage/bdbfull.py:1.13	Tue Feb 11 10:59:27 2003
+++ ZODB4/src/zodb/storage/bdbfull.py	Thu Mar 13 16:32:28 2003
@@ -14,7 +14,7 @@
 
 """Berkeley storage with full undo and versioning support.
 
-$Revision$
+$Id$
 """
 
 import time
@@ -24,35 +24,28 @@
 from zodb.interfaces import *
 from zodb.storage.interfaces import *
 from zodb.utils import p64, u64
-from zodb.serialize import findrefs
 from zodb.timestamp import TimeStamp
 from zodb.conflict import ConflictResolvingStorage, ResolvedSerial
 from zodb.interfaces import ITransactionAttrs
 from zodb.storage.interfaces import StorageSystemError
-from zodb.storage.base import db, ZERO, BerkeleyBase, PackStop, _WorkThread
+from zodb.storage.base import db, BerkeleyBase, PackStop, _WorkThread, \
+     splitrefs
 from zodb.storage._helper import incr
 
 ABORT = 'A'
 COMMIT = 'C'
 PRESENT = 'X'
 
-BDBFULL_SCHEMA_VERSION = 'BF01'
+BDBFULL_SCHEMA_VERSION = 'BF02'
 
+EMPTYSTRING = ''
 # Special flag for uncreated objects (i.e. Does Not Exist)
-DNE = '\377'*8
+DNE = MAXTID
 # DEBUGGING
 #DNE = 'nonexist'
 
 
 
-def DB(name, config):
-    """Create a new object database using BDBFullStorage."""
-    import zodb.db
-    storage = BDBFullStorage(name, config=config)
-    return zodb.db.DB(storage)
-
-
-
 class BDBFullStorage(BerkeleyBase, ConflictResolvingStorage):

[-=- -=- -=- 487 lines omitted -=- -=- -=-]

+                self._cursor = self._table.cursor()
+                try:
+                    self._rec = self._cursor.set(self.tid)
+                except db.DBNotFoundError:
+                    pass
+            # Cursor exhausted?
+            if self._rec is None:
+                self.close()
+                raise IndexError
+            oid = self._rec[1]
+            self._rec = self._cursor.next_dup()
+            data, version, lrevid, refs = self._storage._loadSerialEx(
+                oid, self.tid)
+            return _Record(oid, self.tid, version, data, lrevid, refs)
+        except:
+            self.close()
+            raise
+
+    def close(self):
+        if self._cursor:
+            self._cursor.close()
+            self._cursor = None
 
 
 
 class _Record:
+
+    __implements__ = IDataRecord
+
     # Object Id
     oid = None
     # Object serial number (i.e. revision id)
@@ -1794,13 +1854,16 @@
     data = None
     # The pointer to the transaction containing the pickle data, if not None
     data_txn = None
+    # The list of oids of objects referred to by this object
+    refs = []
 
-    def __init__(self, oid, serial, version, data, data_txn):
+    def __init__(self, oid, serial, version, data, data_txn, refs):
         self.oid = oid
         self.serial = serial
         self.version = version
         self.data = data
         self.data_txn = data_txn
+        self.refs = refs
 
 
 


=== ZODB4/src/zodb/storage/bdbminimal.py 1.12 => 1.13 ===
--- ZODB4/src/zodb/storage/bdbminimal.py:1.12	Tue Feb 11 10:59:27 2003
+++ ZODB4/src/zodb/storage/bdbminimal.py	Thu Mar 13 16:32:28 2003
@@ -14,28 +14,22 @@
 
 """Berkeley storage without undo or versioning.
 
-$Revision$
+$Id$
 """
 
+from zodb.interfaces import ZERO
 from zodb.storage.interfaces import *
 from zodb.utils import p64, u64
-from zodb.serialize import findrefs
 from zodb.conflict import ConflictResolvingStorage, ResolvedSerial
-from zodb.storage.base import db, ZERO, BerkeleyBase, PackStop, _WorkThread
+from zodb.storage.base import db, BerkeleyBase, PackStop, _WorkThread
+from zodb.storage.base import splitrefs
 
 ABORT = 'A'
 COMMIT = 'C'
 PRESENT = 'X'
+EMPTYSTRING = ''
 
-BDBMINIMAL_SCHEMA_VERSION = 'BM01'
-
-
-
-def DB(name, config):
-    """Create a new object database using BDBMinimalStorage."""
-    import zodb.db
-    storage = BDBMinimalStorage(name, config=config)
-    return zodb.db.DB(storage)
+BDBMINIMAL_SCHEMA_VERSION = 'BM02'
 
 
 
@@ -78,6 +72,11 @@
         #     reference count is updated during the _finish() call.  When it
         #     goes to zero, the object is automatically deleted.
         #
+        # references -- {oid+tid -> oid+oid+...}
+        #     For each revision of the object, these are the oids of the
+        #     objects referred to in the data record, as a list of 8-byte
+        #     oids, concatenated together.
+        #
         # oids -- [oid]
         #     This is a list of oids of objects that are modified in the
         #     current uncommitted transaction.
@@ -149,8 +148,11 @@
                     pass
                 else:
                     cs.delete()
-                # And delete the pickle table entry for this revision.
-                self._pickles.delete(oid+tid, txn=txn)
+                # Clean up revision-indexed tables
+                revid = oid+tid
+                self._pickles.delete(revid, txn=txn)
+                if self._references.has_key(revid):
+                    self._references.delete(revid, txn=txn)
         finally:
             # There's a small window of opportunity for leaking a cursor here,
             # if co.close() were to fail.  In practice this shouldn't happen.
@@ -185,19 +187,22 @@
                     if soid <> oid:
                         break
                     if stid <> tid:
+                        revid = oid+stid
                         # This is the previous revision of the object, so
-                        # decref its referents and clean up its pickles.
+                        # decref its references and clean up its pickles.
                         cs.delete()
-                        data = self._pickles.get(oid+stid, txn=txn)
-                        assert data is not None
-                        self._update(deltas, data, -1)
-                        self._pickles.delete(oid+stid, txn=txn)
+                        references = self._references.get(revid, txn=txn)
+                        if references:
+                            self._update(deltas, references, -1)
+                        self._pickles.delete(revid, txn=txn)
+                        if self._references.has_key(revid):
+                            self._references.delete(revid, txn=txn)
                     srec = cs.next_dup()
                 # Now add incref deltas for all objects referenced by the new
                 # revision of this object.
-                data = self._pickles.get(oid+tid, txn=txn)
-                assert data is not None
-                self._update(deltas, data, 1)
+                references = self._references.get(oid+tid, txn=txn)
+                if references:
+                    self._update(deltas, references, 1)
         finally:
             # There's a small window of opportunity for leaking a cursor here,
             # if co.close() were to fail.  In practice this shouldn't happen.
@@ -231,8 +236,9 @@
                 # pickles and refcounts table.  Note that before we remove its
                 # pickle, we need to decref all the objects referenced by it.
                 current = self._getCurrentSerial(oid)
-                data = self._pickles.get(oid+current, txn=txn)
-                self._update(newdeltas, data, -1)
+                references = self._references.get(oid+current, txn=txn)
+                if references:
+                    self._update(newdeltas, references, -1)
                 # And delete the serials, pickle and refcount entries.  At
                 # this point, I believe we should have just one serial entry.
                 self._serials.delete(oid, txn=txn)
@@ -258,7 +264,7 @@
         else:
             txn.commit()
 
-    def _dostore(self, txn, oid, serial, data):
+    def _dostore(self, txn, oid, serial, data, refs):
         conflictresolved = False
         oserial = self._getCurrentSerial(oid)
         if oserial is not None and serial <> oserial:
@@ -267,11 +273,15 @@
             # number.  Raise a ConflictError.
             data = self.resolveConflict(oid, oserial, serial, data)
             conflictresolved = True
-        # Optimistically write to the serials and pickles table.  Be sure
-        # to also update the oids table for this object too.
+        # Optimistically write to the various tables.
         newserial = self._serial
+        revid = oid+newserial
         self._serials.put(oid, newserial, txn=txn)
-        self._pickles.put(oid+newserial, data, txn=txn)
+        self._pickles.put(revid, data, txn=txn)
+        if refs:
+            references = EMPTYSTRING.join(refs)
+            assert len(references) % 8 == 0
+            self._references.put(revid, references, txn=txn)
         self._oids.put(oid, PRESENT, txn=txn)
         # If we're in the middle of a pack, we need to add these objects to
         # the packmark, so a specific race condition won't collect them.
@@ -284,7 +294,7 @@
             return ResolvedSerial
         return newserial
 
-    def store(self, oid, serial, data, version, transaction):
+    def store(self, oid, serial, data, refs, version, transaction):
         if transaction is not self._transaction:
             raise StorageTransactionError(self, transaction)
         # We don't support versions
@@ -293,7 +303,7 @@
         # All updates must be done with the application lock acquired
         self._lock_acquire()
         try:
-            return self._withtxn(self._dostore, oid, serial, data)
+            return self._withtxn(self._dostore, oid, serial, data, refs)
         finally:
             self._lock_release()
 
@@ -434,9 +444,12 @@
                 # unit tests), and we're looking up oid ZERO.  Then serial
                 # will be None.
                 if tid is not None:
-                    data = self._pickles[oid+tid]
-                    for oid in findrefs(data):
-                        self._oidqueue.append(oid, txn)
+                    # Now get the oids of all the objects referenced by this
+                    # object revision
+                    references = self._references.get(oid+tid)
+                    if references:
+                        for oid in splitrefs(references):
+                            self._oidqueue.append(oid, txn)
             # Pop the next oid off the queue and do it all again
             rec = self._oidqueue.consume(txn)
             oid = rec and rec[1]
@@ -487,7 +500,7 @@
                     pass
             finally:
                 c.close()
-            # Now collect the pickle data and do reference counting
+            # Collect the pickle data
             c = self._pickles.cursor(txn)
             try:
                 try:
@@ -497,17 +510,33 @@
                 while rec and rec[0][:8] == oid:
                     if self._stop:
                         raise PackStop, 'stopped in _collect_objs() loop 2'
-                    data = rec[1]
                     c.delete()
                     rec = c.next()
-                    deltas = {}
-                    self._update(deltas, data, -1)
-                    for oid, delta in deltas.items():
-                        refcount = u64(self._refcounts.get(oid, ZERO)) + delta
-                        if refcount <= 0:
-                            self._oidqueue.append(oid, txn)
-                        else:
-                            self._refcounts.put(oid, p64(refcount), txn=txn)
+            finally:
+                c.close()
+            # Collect references and do reference counting
+            c = self._references.cursor(txn)
+            try:
+                try:
+                    rec = c.set_range(oid)
+                except db.DBNotFoundError:
+                    rec = None
+                while rec and rec[0][:8] == oid:
+                    if self._stop:
+                        raise PackStop, 'stopped in _collect_objs() loop 3'
+                    references = rec[1]
+                    if references:
+                        deltas = {}
+                        self._update(deltas, references, -1)
+                        for oid, delta in deltas.items():
+                            rc = u64(self._refcounts.get(oid, ZERO)) + delta
+                            if rc <= 0:
+                                self._oidqueue.append(oid, txn)
+                            else:
+                                self._refcounts.put(oid, p64(rc), txn=txn)
+                        # Delete table entry
+                        c.delete()
+                        rec = c.next()
             finally:
                 c.close()
             # We really do want this down here, since _decrefPickle() could


=== ZODB4/src/zodb/storage/file.py 1.11 => 1.12 === (1431/1531 lines abridged)
--- ZODB4/src/zodb/storage/file.py:1.11	Thu Feb 27 18:19:20 2003
+++ ZODB4/src/zodb/storage/file.py	Thu Mar 13 16:32:28 2003
@@ -19,7 +19,7 @@
 
   In this section, the first two bytes are the characters F and S.
 
-  The next two bytes are a storage format version id, currently "01".
+  The next two bytes are a storage format version id, currently "42".
 
   The next section is a four-byte database version string, encoded as
   byte 0: major version number
@@ -67,6 +67,8 @@
 
   - 2-byte version length
 
+  - 4-byte number of object references (oids)
+
   - 8-byte data length
 
   ? 8-byte position of non-version data
@@ -75,11 +77,11 @@
   ? 8-byte position of previous record in this version
     (if version length > 0)
 
-  ?   version string
-    (if version length > 0)
+  ? version string (if version length > 0)
 
-  ?   data
-    (data length > 0)
+  ? reference oids (length == # of oids * 8)
+
+  ? data (if data length > 0)
 
   ? 8-byte position of data record containing data
     (data length == 0)
@@ -146,26 +148,25 @@
     fsync = None
 
 import zodb.db
-from zodb.storage.base import BaseStorage
+from zodb.storage.base import BaseStorage, splitrefs
 from zodb import conflict
 from zodb import interfaces
-from zodb.interfaces import UndoError, POSKeyError, MultipleUndoErrors
-from zodb.serialize import findrefs
+from zodb.interfaces import _fmt_oid
+from zodb.interfaces import *
 from zodb.timestamp import TimeStamp, newTimeStamp, timeStampFromTime
 from zodb.lockfile import LockFile

[-=- -=- -=- 1431 lines omitted -=- -=- -=-]

+    def __init__(self, oid, serial, version, data, data_txn, refs):
+        self.oid = oid
+        self.serial = serial
+        self.version = version
+        self.data = data
+        self.data_txn = data_txn
+        self.refs = refs
 
 class UndoSearch:
 
@@ -2261,19 +2111,22 @@
 class DataHeader:
     """Header for a data record."""
 
-    __slots__ = ("oid", "serial", "prev", "tloc", "vlen", "plen", "back",
-                 # These three attributes are only defined when vlen > 0
-                 "pnv", "vprev", "version")
+    __slots__ = (
+        "oid", "serial", "prev", "tloc", "vlen", "plen", "nrefs", "back",
+        # These three attributes are only defined when vlen > 0
+        "pnv", "vprev", "version")
 
     version = ""
     back = 0
 
-    def __init__(self, oid, serial, prev, tloc, vlen, plen):
+    def __init__(self, oid, serial, prev, tloc, vlen, nrefs, plen):
         self.oid = oid
         self.serial = serial
         self.prev = prev
         self.tloc = tloc
+
         self.vlen = vlen
+        self.nrefs = nrefs
         self.plen = plen
 
     def fromString(cls, s):
@@ -2284,6 +2137,12 @@
     def parseVersion(self, buf):
         self.pnv, self.vprev = struct.unpack(">QQ", buf[:16])
         self.version = buf[16:]
+
+    def recordlen(self):
+        rlen = DATA_HDR_LEN + (self.nrefs * 8) + (self.plen or 8)
+        if self.version:
+            rlen += 16 + self.vlen
+        return rlen
 
 
 def cleanup(filename):


=== ZODB4/src/zodb/storage/fsdump.py 1.3 => 1.4 ===
--- ZODB4/src/zodb/storage/fsdump.py:1.3	Fri Jan 24 18:20:52 2003
+++ ZODB4/src/zodb/storage/fsdump.py	Thu Mar 13 16:32:28 2003
@@ -17,6 +17,8 @@
 from zodb.storage.file \
      import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
 from zodb.utils import u64
+from zodb.storage.base import splitrefs
+from zodb.storage.tests.base import zodb_unpickle
 
 def fmt(p64):
     # Return a nicely formatted string for a packaged 64-bit value
@@ -74,7 +76,7 @@
         pos = self.file.tell()
         h = self.file.read(DATA_HDR_LEN)
         assert len(h) == DATA_HDR_LEN
-        oid, revid, prev, tloc, vlen, dlen = struct.unpack(DATA_HDR, h)
+        oid, revid, prev, tloc, vlen, nrefs, dlen = struct.unpack(DATA_HDR, h)
         print >> self.dest, "-" * 60
         print >> self.dest, "offset: %d" % pos
         print >> self.dest, "oid: %s" % fmt(oid)
@@ -89,8 +91,15 @@
             print >> self.dest, "non-version data offset: %d" % u64(pnv)
             print >> self.dest, \
                   "previous version data offset: %d" % u64(sprevdata)
+        print >> self.dest, 'numrefs:', nrefs
+        for ref in splitrefs(self.file.read(nrefs * 8)):
+            print >> self.dest, '\t%s' % fmt(refs)
+        # XXX print out the oids?
         print >> self.dest, "len(data): %d" % dlen
-        self.file.read(dlen)
+        data = self.file.read(dlen)
+        # A debugging feature for use with the test suite.
+        if data.startswith("(czodb.storage.tests.minpo\nMinPO\n"):
+            print >> self.dest, "value: %r" % zodb_unpickle(data).value
         if not dlen:
             sbp = self.file.read(8)
             print >> self.dest, "backpointer: %d" % u64(sbp)


=== ZODB4/src/zodb/storage/interfaces.py 1.9 => 1.10 ===
--- ZODB4/src/zodb/storage/interfaces.py:1.9	Thu Mar  6 15:33:52 2003
+++ ZODB4/src/zodb/storage/interfaces.py	Thu Mar 13 16:32:28 2003
@@ -57,7 +57,7 @@
     data.  A load() method can not run at the same time as tpcFinish()
     if it would be possible to read inconsistent data.  XXX Need to
     flesh out the details here.
-    
+
     """
 
     def close():
@@ -109,13 +109,15 @@
     def getSerial(oid):
         """Return the current serial number for oid."""
 
-    def store(oid, serial, data, version, txn):
+    def store(oid, serial, data, refs, version, txn):
         """Store an object and returns a new serial number.
 
         Arguments:
         oid -- the object id, a string
         serial -- the serial number of the revision read by txn, a string
-        data -- the data record, a string
+        data -- a 2-tuple of the data record (string), and the oids of the
+                objects referenced by the this object, as a list
+        refs -- the list of object ids of objects referenced by the data
         version -- the version, a string, typically the empty string
         txn -- the current transaction
 
@@ -135,11 +137,11 @@
         protocol that complicates the return value.  Maybe we can fix that.
         """
 
-    def restore(oid, serial, data, version, prev_txn, txn):
+    def restore(oid, serial, data, refs, version, prev_txn, txn):
         """Store an object with performing consistency checks.
 
         The arguments are the same as store() except for prev_txn.
-        If prev_txn is not None, then prev_txn is the
+        If prev_txn is not None, then prev_txn is the XXX ...?
         """
         pass
 
@@ -182,13 +184,13 @@
         pass
 
 class IUndoStorage(Interface):
-    
+
     def loadSerial(oid, serial):
         """Return data record for revision `serial` of `oid.`
 
         Raises POSKeyError if the revisions is not available.
         """
-    
+
     def undo(txnid, txn):
         pass
 
@@ -215,8 +217,8 @@
     def versionEmpty(version):
         pass
 
-    def versions(max=None): 
-       pass
+    def versions():
+        pass
 
 class IStorageIterator(Interface):
 
@@ -244,7 +246,7 @@
 
         Raises IndexError if there are no more.
         """
-    
+
 class IDataRecord(Interface):
 
     oid = Attribute("oid", "object id")
@@ -259,6 +261,8 @@
                          wrote the data.  The current transaction contains
                          a logical copy of that data.
                          """)
+    refs = Attribute("refs",
+                     """list of object ids referred to by this object""")
 
 class StorageError(POSError):
     """Base class for storage based exceptions."""
@@ -290,4 +294,3 @@
 
 class TransactionTooLargeError(StorageTransactionError):
     """The transaction exhausted some finite storage resource."""
-


=== ZODB4/src/zodb/storage/mapping.py 1.5 => 1.6 ===
--- ZODB4/src/zodb/storage/mapping.py:1.5	Tue Feb 25 13:55:03 2003
+++ ZODB4/src/zodb/storage/mapping.py	Thu Mar 13 16:32:28 2003
@@ -20,6 +20,7 @@
 
 The Mapping storage uses a single data structure to map object ids to
 data.
+
 $Id$
 """
 
@@ -27,9 +28,7 @@
 from zodb import interfaces, utils
 from zodb.storage.base import BaseStorage
 from zodb.storage.interfaces import *
-from zodb.serialize import findrefs
 from zodb.timestamp import TimeStamp
-from zodb.utils import z64
 
 class MappingStorage(BaseStorage):
 
@@ -50,12 +49,12 @@
     def load(self, oid, version):
         self._lock_acquire()
         try:
-            p = self._index[oid]
-            return p[8:], p[:8] # pickle, serial
+            serial, data, refs = self._index[oid]
+            return data, serial
         finally:
             self._lock_release()
 
-    def store(self, oid, serial, data, version, transaction):
+    def store(self, oid, serial, data, refs, version, transaction):
         if transaction is not self._transaction:
             raise StorageTransactionError(self, transaction)
 
@@ -65,22 +64,21 @@
         self._lock_acquire()
         try:
             if self._index.has_key(oid):
-                old = self._index[oid]
-                oserial = old[:8]
+                oserial, odata, orefs = self._index[oid]
                 if serial != oserial:
                     raise interfaces.ConflictError(serials=(oserial, serial))
-
-            self._tindex.append((oid, self._serial + data))
-        finally: self._lock_release()
-
-        return self._serial
+            serial = self._serial
+            self._tindex.append((oid, serial, data, refs))
+        finally:
+            self._lock_release()
+        return serial
 
     def _clear_temp(self):
         self._tindex = []
 
     def _finish(self, tid, user, desc, ext):
-        for oid, p in self._tindex:
-            self._index[oid] = p
+        for oid, serial, data, refs in self._tindex:
+            self._index[oid] = serial, data, refs
         self._ltid = self._serial
 
     def lastTransaction(self):
@@ -89,37 +87,33 @@
     def pack(self, t):
         self._lock_acquire()
         try:
-            # Build an index of *only* those objects reachable from the root.
-            rootl = ["\0\0\0\0\0\0\0\0"]
-            pindex = {}
+            # Build an index of those objects reachable from the root.
+            rootl = [ZERO]
+            packmark = {}
             while rootl:
                 oid = rootl.pop()
-                if pindex.has_key(oid):
+                if packmark.has_key(oid):
                     continue
-                # Scan non-version pickle for references
-                r = self._index[oid]
-                pindex[oid] = r
-                p = r[8:]
-                rootl.extend(findrefs(p))
-
+                # Register this oid and append the objects referenced by this
+                # object to the root search list.
+                rec = self._index[oid]
+                packmark[oid] = rec
+                rootl.extend(rec[3])
             # Now delete any unreferenced entries:
-            for oid in self._index.keys():
-                if not pindex.has_key(oid):
-                    del self._index[oid]
-
+            for oid in index.keys():
+                if not packmark.has_key(oid):
+                    del index[oid]
         finally:
             self._lock_release()
 
     def _splat(self):
         """Spit out a string showing state."""
-        o=[]
-        o.append("Index:")
-        index=self._index
-        keys=index.keys()
+        o = []
+        o.append('Index:')
+        keys = self._index.keys()
         keys.sort()
         for oid in keys:
-            r=index[oid]
-            o.append("  %s: %s, %s" %
+            r = self._index[oid]
+            o.append('  %s: %s, %s' %
                      (utils.u64(oid),TimeStamp(r[:8]),`r[8:]`))
-
         return "\n".join(o)