[Zodb-checkins] CVS: Zope3/src/zodb/storage - bdbfull.py:1.19

Barry Warsaw barry@wooz.org
Mon, 17 Mar 2003 15:22:40 -0500


Update of /cvs-repository/Zope3/src/zodb/storage
In directory cvs.zope.org:/tmp/cvs-serv24092

Modified Files:
	bdbfull.py 
Log Message:
pack(), autopack(): Simplify the api since these are essentially the
same method with just the gc flag argument difference.

_dopack(): gc flag isn't optinal

"classicpack" -> "gcpack"


=== Zope3/src/zodb/storage/bdbfull.py 1.18 => 1.19 ===
--- Zope3/src/zodb/storage/bdbfull.py:1.18	Fri Mar 14 15:44:50 2003
+++ Zope3/src/zodb/storage/bdbfull.py	Mon Mar 17 15:22:39 2003
@@ -297,7 +297,7 @@
         lastpacktime = u64(self._last_packtime())
         return _Autopack(
             self, event,
-            config.frequency, config.packtime, config.classicpack,
+            config.frequency, config.packtime, config.gcpack,
             lastpacktime)
 
     def _doabort(self, txn, tid):
@@ -1293,36 +1293,27 @@
             last = abs(last)
         return self._withlock(self._doundolog, first, last, filter)
 
-    # Packing
-    #
-    # There are two types of pack operations, the classic pack and the
-    # autopack.  Autopack's primary job is to periodically delete non-current
-    # object revisions.  It runs in a thread and has an `autopack time' which
-    # is essentially just a time in the past at which to autopack to.  For
-    # example, you might set up autopack to run once per hour, packing away
-    # all revisions that are older than 4 hours.  Autopack can also be
-    # configured to periodically do a classic pack.
     #
-    # Classic pack is like autopack -- it packs away old revisions -- but it
-    # also does a mark and sweep through all the known objects, looking for
-    # those that are not root reachable as of the pack time.  Such objects are
-    # also packed away even if they have current revisions in the packable
-    # transactions, because it means that there is no undo operation that can
-    # restore the object's reachability.  Remember that you cannot undo
-    # previous to the latest pack time.
-    #
-    # Both packing strategies do reference counting, and the refcounts are
-    # sums of the refcounts of all revisions, so if an object's refcount goes
-    # to zero, all its object revisions can safely be packed away.
+    # Packing
     #
-    # We try to interleave BerkeleyDB transactions and non-pack-lock
-    # acquisition as granularly as possible so that packing doesn't block
-    # other operations for too long.  But remember we don't use Berkeley locks
-    # so we have to be careful about our application level locks.
-
-    # First, the public API for classic pack
-    def pack(self, t):
-        self.log('classic pack started')
+
+    def pack(self, t, gc=True):
+        """Perform a pack on the storage.
+
+        There are two forms of packing: incremental and full gc.  In an
+        incremental pack, only old object revisions are removed.  In a full gc
+        pack, cyclic garbage detection and removal is also performed.
+
+        t is the pack time.  All non-current object revisions older than t
+        will be removed in an incremental pack.
+
+        pack() always performs an incremental pack.  If the gc flag is True,
+        then pack() will also perform a garbage collection.  Some storages
+        (e.g. FileStorage) always do both phases in a pack() call.  Such
+        storages should simply ignore the gc flag.
+        """
+        self.log('pack started (packtime: %s, gc? %s)', t,
+                 (gc and 'yes' or 'no'))
         # A simple wrapper around the bulk of packing, but which acquires a
         # lock that prevents multiple packs from running at the same time.
         self._packlock.acquire()
@@ -1332,13 +1323,13 @@
             # operation across several Berkeley transactions, which allows
             # other work to happen (stores and reads) while packing is being
             # done.
-            self._dopack(t)
+            self._dopack(t, gc)
         finally:
             self._packing = False
             self._packlock.release()
-        self.log('classic pack finished')
+        self.log('pack finished')
 
-    def _dopack(self, t, gc=True):
+    def _dopack(self, t, gc):
         # t is a TimeTime, or time float, convert this to a TimeStamp object,
         # using an algorithm similar to what's used in FileStorage.  We know
         # that our transaction ids, a.k.a. revision ids, are timestamps.
@@ -1388,29 +1379,6 @@
         finally:
             self._lock_release()
 
-    def autopack(self, t, gc=False):
-        """Perform an autopack pass.
-
-        Autopacking is different than classic pack() in that it doesn't do
-        cyclic garbage detection unless the gc flag is True.
-        """
-        self.log('autopack started (packtime: %s, gc? %s)', t,
-                 (gc and 'yes' or 'no'))
-        # A simple wrapper around the bulk of packing, but which acquires a
-        # lock that prevents multiple packs from running at the same time.
-        self._packlock.acquire()
-        self._packing = True
-        try:
-            # We don't wrap this in _withtxn() because we're going to do the
-            # operation across several Berkeley transactions, which allows
-            # other work to happen (stores and reads) while packing is being
-            # done.
-            self._dopack(t, gc)
-        finally:
-            self._packing = False
-            self._packlock.release()
-        self.log('autopack finished')
-
     def _collect_revs(self, txn, packtid):
         ct = co = None
         try:
@@ -1870,21 +1838,21 @@
     NAME = 'autopacking'
 
     def __init__(self, storage, event,
-                 frequency, packtime, classicpack,
+                 frequency, packtime, gcpack,
                  lastpacktime):
         _WorkThread.__init__(self, storage, event, frequency)
         self._packtime = packtime
-        self._classicpack = classicpack
+        self._gcpack = gcpack
         # Bookkeeping
-        self._lastclassic = 0
+        self._lastgc = 0
 
     def _dowork(self):
-        # Should we do a classic pack this time?
-        if self._classicpack <= 0:
-            classicp = False
+        # Should we do a full gc pack this time?
+        if self._gcpack <= 0:
+            dofullgc = False
         else:
-            v = (self._lastclassic + 1) % self._classicpack
-            self._lastclassic = v
-            classicp = not v
-        # Run the autopack phase
-        self._storage.autopack(time.time() - self._packtime, classicp)
+            v = (self._lastgc + 1) % self._gcpack
+            self._lastgc = v
+            dofullgc = not v
+        # Run the full gc phase
+        self._storage.pack(time.time() - self._packtime, dofullgc)