[Zope3-checkins] CVS: ZODB4/src/zodb/storage - bdbfull.py:1.12.4.1

Barry Warsaw barry@wooz.org
Mon, 10 Feb 2003 18:05:45 -0500


Update of /cvs-repository/ZODB4/src/zodb/storage
In directory cvs.zope.org:/tmp/cvs-serv3032/src/zodb/storage

Modified Files:
      Tag: opaque-pickles-branch
	bdbfull.py 
Log Message:
The start of opaque pickles (from the p.o.v. of the storages).  This
will eventually allow us to pass compressed pickles to the storage if
we want.

The approach basically changes store() so that the data argument is a
2-tuple of the pickle and the list of oids referenced in the pickle.
This is the first step in the changes, but currently, only Berkeley
storages natively store the refs included in the store() API call.

Changes here include:

- We don't need findrefs() here since store() will be handed the list
  of oid references.

- Bump the schema version to BF02 to reflect the addition of the
  referents table.

- _doabort(), _docommit(): Cleanup and use the referents table to
  properly adjust the object refcounts.

- _dostore(): Split the data 2-tuple arg into the data and refs, and
  update the referents table, but only if there actually are oids in
  that list.

_ collect_revs(), _decrefPickle(), _mark(): Get rid of pickle sniffing
  for object references.  Use the stored information in the referents
  table instead.

- get ZERO from zodb.interfaces, and use MAXTID for DNE.


=== ZODB4/src/zodb/storage/bdbfull.py 1.12 => 1.12.4.1 ===
--- ZODB4/src/zodb/storage/bdbfull.py:1.12	Wed Feb  5 18:28:32 2003
+++ ZODB4/src/zodb/storage/bdbfull.py	Mon Feb 10 18:05:44 2003
@@ -24,22 +24,22 @@
 from zodb.interfaces import *
 from zodb.storage.interfaces import *
 from zodb.utils import p64, u64
-from zodb.serialize import findrefs
 from zodb.timestamp import TimeStamp
 from zodb.conflict import ConflictResolvingStorage, ResolvedSerial
 from zodb.interfaces import ITransactionAttrs
 from zodb.storage.interfaces import StorageSystemError
-from zodb.storage.base import db, ZERO, BerkeleyBase, PackStop, _WorkThread
+from zodb.storage.base import db, BerkeleyBase, PackStop, _WorkThread
 from zodb.storage._helper import incr
 
 ABORT = 'A'
 COMMIT = 'C'
 PRESENT = 'X'
 
-BDBFULL_SCHEMA_VERSION = 'BF01'
+BDBFULL_SCHEMA_VERSION = 'BF02'
 
+EMPTYSTRING = ''
 # Special flag for uncreated objects (i.e. Does Not Exist)
-DNE = '\377'*8
+DNE = MAXTID
 # DEBUGGING
 #DNE = 'nonexist'
 
@@ -107,6 +107,11 @@
         #     object, so it is never decremented except at pack time.  When it
         #     goes to zero, the object is automatically deleted.
         #
+        # referents -- {oid+tid -> oid+oid+...}
+        #     For each revision of the object, these are the oids of the
+        #     objects referred to in the data record, as a list of 8-byte
+        #     oids, concatenated together.
+        #
         # oids -- [oid]
         #     This is a list of oids of objects that are modified in the
         #     current uncommitted transaction.
@@ -330,6 +335,8 @@
                 vid = self._metadata[revid][:8]
                 self._metadata.delete(revid, txn=txn)
                 self._pickles.delete(revid, txn=txn)
+                if self._referents.has_key(revid):
+                    self._referents.delete(revid, txn=txn)
                 # Clean up the object revisions table
                 try:
                     cr.set(oid+tid)
@@ -406,8 +413,9 @@
                 # for the George Bailey Event, which has no pickle.
                 if lrevid <> DNE:
                     revid = oid + lrevid
-                    data = self._pickles[revid]
-                    self._update(deltas, data, 1)
+                    referents = self._referents.get(revid, txn=txn)
+                    if referents:
+                        self._update(deltas, referents, 1)
                     # Incref this pickle; there's a new revision pointing to it
                     refcount = self._pickleRefcounts.get(revid, ZERO, txn=txn)
                     self._pickleRefcounts.put(revid, incr(refcount, 1),
@@ -433,7 +441,7 @@
             # if co.close() were to fail.  In practice this shouldn't happen.
             if co: co.close()
             if cs: cs.close()
-        # Now incref all the object refcounts
+        # Now incref all referents
         for oid, delta in deltas.items():
             refcount = self._refcounts.get(oid, ZERO, txn=txn)
             self._refcounts.put(oid, incr(refcount, delta), txn=txn)
@@ -461,7 +469,7 @@
         # transaction were to abort, we'd clean this up anyway.
         userlen = len(u)
         desclen = len(d)
-        lengths = pack('>II', userlen, desclen)
+        lengths = pack('>2I', userlen, desclen)
         data = lengths + u + d + e
         self._pending.put(tid, ABORT, txn=txn)
         self._txnMetadata.put(tid, data, txn=txn)
@@ -478,6 +486,7 @@
     #
 
     def _dostore(self, txn, oid, serial, data, version):
+        data, refs = data
         conflictresolved = False
         vid = nvrevid = ovid = ZERO
         # Check for conflict errors.  JF says: under some circumstances,
@@ -527,10 +536,15 @@
                 # The non-version revid is the same as for the previous
                 # revision of the object.
                 nvrevid = onvrevid
-        # Now store optimistically data to all the tables
+        # Now optimistically store data to all the tables
         newserial = self._serial
         revid = oid + newserial
         self._serials.put(oid, newserial, txn=txn)
+        # Store object referents, but only if the list is non-empty
+        if refs:
+            referents = EMPTYSTRING.join(refs)
+            assert len(referents) % 8 == 0
+            self._referents.put(revid, referents, txn=txn)
         self._pickles.put(revid, data, txn=txn)
         self._metadata.put(revid, vid+nvrevid+newserial+oserial, txn=txn)
         self._txnoids.put(newserial, oid, txn=txn)
@@ -1212,7 +1226,7 @@
                 rec = c.prev()
                 if tid <= packtime:
                     break
-                userlen, desclen = unpack('>II', txnmeta[:8])
+                userlen, desclen = unpack('>2I', txnmeta[:8])
                 user = txnmeta[8:8+userlen]
                 desc = txnmeta[8+userlen:8+userlen+desclen]
                 ext = txnmeta[8+userlen+desclen:]
@@ -1398,14 +1412,14 @@
                 # with it again.  Otherwise, we can remove the metadata record
                 # for this revision and decref the corresponding pickle.
                 if oldserial <> ZERO:
+                    orevid = oid+oldserial
                     # It's possible this object revision has already been
                     # deleted, if the oid points to a decref'd away object
-                    try:
-                        metadata = self._metadata[oid+oldserial]
-                    except KeyError:
-                        pass
-                    else:
-                        self._metadata.delete(oid+oldserial, txn=txn)
+                    if self._metadata.has_key(orevid):
+                        metadata = self._metadata[orevid]
+                        self._metadata.delete(orevid, txn=txn)
+                        if self._referents.has_key(orevid):
+                            self._referents.delete(orevid, txn=txn)
                         # Decref the pickle
                         self._decrefPickle(oid, metadata[16:24], txn)
                     try:
@@ -1430,19 +1444,21 @@
         if lrevid == DNE:
             # There is no pickle data
             return
-        key = oid + lrevid
-        refcount = u64(self._pickleRefcounts.get(key, ZERO)) - 1
+        revid = oid + lrevid
+        refcount = u64(self._pickleRefcounts.get(revid, ZERO)) - 1
         assert refcount >= 0
         if refcount == 0:
             # We can collect this pickle
-            self._pickleRefcounts.delete(key, txn=txn)
-            data = self._pickles[key]
-            self._pickles.delete(key, txn=txn)
-            deltas = {}
-            self._update(deltas, data, -1)
-            self._decref(deltas, txn)
+            self._pickleRefcounts.delete(revid, txn=txn)
+            self._pickles.delete(revid, txn=txn)
+            # And decref all objects pointed to by this pickle
+            referents = self._referents.get(revid, txn=txn)
+            if referents:
+                deltas = {}
+                self._update(deltas, referents, -1)
+                self._decref(deltas, txn)
         else:
-            self._pickleRefcounts.put(key, p64(refcount), txn=txn)
+            self._pickleRefcounts.put(revid, p64(refcount), txn=txn)
 
     def _decref(self, deltas, txn):
         for oid, delta in deltas.items():
@@ -1576,9 +1592,11 @@
                     lrevid = self._metadata[oid+tid][16:24]
                     data = self._pickles[oid+lrevid]
                     # Now get the oids of all the objects referenced by this
-                    # pickle
-                    for oid in findrefs(data):
-                        self._oidqueue.append(oid, txn)
+                    # object revision
+                    referents = self._referents.get(oid+lrevid)
+                    if referents:
+                        for oid in self._splitoids(referents):
+                            self._oidqueue.append(oid, txn)
             # Pop the next oid off the queue and do it all again
             rec = self._oidqueue.consume(txn)
             oid = rec and rec[1]
@@ -1648,7 +1666,7 @@
                 packedp = True
             else:
                 packedp = False
-            userlen, desclen = unpack('>II', data[:8])
+            userlen, desclen = unpack('>2I', data[:8])
             user = data[8:8+userlen]
             desc = data[8+userlen:8+userlen+desclen]
             ext = data[8+userlen+desclen:]