[Zodb-checkins] SVN: ZODB/branches/tim-simpler_connection/src/ZODB/ Test auto-purging of the available-connection deque.

Tim Peters tim.one at comcast.net
Tue Oct 26 17:45:26 EDT 2004


Log message for revision 28260:
  Test auto-purging of the available-connection deque.
  
  This triggered some code changes.  When closing a connection,
  the meaning of "the pool is too big" is debatable:  it may mean
  that the number of all connections the pool knows about exceeds
  pool_size, or it may mean that the stack of available
  connections (a subset of all the connections) exceeds pool_size.
  
  The code was changed to mean the latter, primarily because it's
  predictable.  The total number of connections the pool knows
  about can change at any time, because it depends on which weak
  references DB holds that cyclic gc hasn't yet cleared.
  
  But it doesn't matter much either way, since clients are never
  "supposed to" exceed pool_size anyway.  I think it degrades
  more gracefully this way.
  

Changed:
  U   ZODB/branches/tim-simpler_connection/src/ZODB/DB.py
  U   ZODB/branches/tim-simpler_connection/src/ZODB/tests/dbopen.txt

-=-
Modified: ZODB/branches/tim-simpler_connection/src/ZODB/DB.py
===================================================================
--- ZODB/branches/tim-simpler_connection/src/ZODB/DB.py	2004-10-26 20:40:08 UTC (rev 28259)
+++ ZODB/branches/tim-simpler_connection/src/ZODB/DB.py	2004-10-26 21:45:25 UTC (rev 28260)
@@ -49,7 +49,7 @@
 
     When a connection is explicitly closed, tell the pool via repush().
     That adds the connection to a stack of connections available for
-    reuse, and throws away the oldest stack entries if the pool is too large.
+    reuse, and throws away the oldest stack entries if the stack is too large.
     pop() pops this stack.
 
     When a connection is obtained via pop(), the pool holds only a weak
@@ -72,7 +72,8 @@
         # A stack of connections available to hand out.  This is a subset
         # of self.all.  push() and repush() add to this, and may remove
         # the oldest available connections if the pool is too large.
-        # pop() pops this stack.
+        # pop() pops this stack.  There are never more than pool_size entries
+        # in this stack.
         # In Python 2.4, a collections.deque would make more sense than
         # a list (we push only "on the right", but may pop from both ends).
         self.available = []
@@ -81,15 +82,16 @@
     # If the pool_size is smaller than the current value, this may discard
     # the oldest available connections.
     def set_pool_size(self, pool_size):
-        self.pool_size = pool_size + 1  # _reduce_size shoots for < pool_size
+        self.pool_size = pool_size
         self._reduce_size()
-        self.pool_size = pool_size
 
     # Register a new available connection.  We must not know about c already.
+    # c will be pushed onto the available stack even if we're over the
+    # pool size limit.
     def push(self, c):
         assert c not in self.all
         assert c not in self.available
-        self._reduce_size()
+        self._reduce_size(strictly_less=True)
         self.all.add(c)
         self.available.append(c)
         n, limit = len(self.all), self.pool_size
@@ -100,17 +102,21 @@
             reporter("DB.open() has %s open connections with a pool_size "
                      "of %s", n, limit)
 
-    # Reregister an available connection formerly obtained via pop().
+    # Reregister an available connection formerly obtained via pop().  This
+    # pushes it on the stack of available connections, and may discard
+    # older available connections.
     def repush(self, c):
         assert c in self.all
         assert c not in self.available
-        self._reduce_size()
+        self._reduce_size(strictly_less=True)
         self.available.append(c)
 
     # Throw away the oldest available connections until we're under our
-    # target size.  It may not be possible to achieve this.
-    def _reduce_size(self):
-        while self.available and len(self.all) >= self.pool_size:
+    # target size (strictly_less=False) or no more than that (strictly_less=
+    # True, the default).  It may not be possible to achieve this.
+    def _reduce_size(self, strictly_less=False):
+        target = self.pool_size - bool(strictly_less)
+        while len(self.available) > target:
             c = self.available.pop(0)
             self.all.remove(c)
 

Modified: ZODB/branches/tim-simpler_connection/src/ZODB/tests/dbopen.txt
===================================================================
--- ZODB/branches/tim-simpler_connection/src/ZODB/tests/dbopen.txt	2004-10-26 20:40:08 UTC (rev 28259)
+++ ZODB/branches/tim-simpler_connection/src/ZODB/tests/dbopen.txt	2004-10-26 21:45:25 UTC (rev 28260)
@@ -17,7 +17,7 @@
 >>> from ZODB import DB
 >>> from ZODB.MappingStorage import MappingStorage as Storage
 
-Capturing log messages from DB is important for part of the examples:
+Capturing log messages from DB is important for some of the examples:
 
 >>> from zope.testing.loggingsupport import InstalledHandler
 >>> handler = InstalledHandler('ZODB.DB')
@@ -71,7 +71,7 @@
 >>> handler.clear()
 >>> st.close()
 >>> st = Storage()
->>> PS = 2
+>>> PS = 2 # smaller pool size
 >>> db = DB(st, pool_size=PS)
 >>> conns = [db.open() for dummy in range(PS)]
 >>> handler.records
@@ -97,7 +97,7 @@
 >>> print msg.name, msg.levelname, msg.getMessage()
 ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
 
-And a critical for going beyond that:
+And critical for going beyond that:
 
 >>> conns.append(db.open())
 >>> len(conns)
@@ -112,12 +112,9 @@
 
 >>> handler.clear()
 >>> db.setPoolSize(6)
->>> conns.append(db.open)
+>>> conns.append(db.open())
 >>> handler.records  # no log msg -- the pool is bigger now
 []
->>> conns.append(db.open()) # likewise
->>> handler.records
-[]
 >>> conns.append(db.open()) # but one more and there's a warning again
 >>> len(handler.records)
 1
@@ -194,7 +191,7 @@
 that are still alive.
 
 
->>> len(db.cacheDetailSize())
+>>> len(db.cacheDetailSize())  # one result for each connection's cache
 3
 
 If a connection object is abandoned (it becomes unreachable), then it
@@ -207,7 +204,7 @@
 >>> len(pool.all)
 3
 >>> c3 = None
->>> dummy = gc.collect()
+>>> dummy = gc.collect()  # removes c3 from pool.all
 >>> len(pool.all)
 2
 
@@ -217,5 +214,74 @@
 >>> len(pool.available)
 0
 
+Nothing in that last block should have logged any msgs:
+
+>>> handler.records
+[]
+
+If "too many" connections are open, then closing one may kick an older
+closed one out of the available connection stack.
+
 >>> st.close()
+>>> st = Storage()
+>>> db = DB(st, pool_size=3)
+>>> conns = [db.open() for dummy in range(6)]
+>>> len(handler.records)  # 3 warnings for the "excess" connections
+3
+>>> pool = db._pools['']
+>>> len(pool.available), len(pool.all)
+(0, 6)
+
+Let's mark them:
+
+>>> for i, c in enumerate(conns):
+...     c.MARKER = i
+
+Closing connections adds them to the stack:
+
+>>> for i in range(3):
+...     conns[i].close()
+>>> len(pool.available), len(pool.all)
+(3, 6)
+>>> del conns[:3]  # leave the ones with MARKERs 3, 4 and 5
+
+Closing another one will purge the one with MARKER 0 from the stack
+(since it was the first added to the stack):
+
+>>> [c.MARKER for c in pool.available]
+[0, 1, 2]
+>>> conns[0].close()  # MARKER 3
+>>> len(pool.available), len(pool.all)
+(3, 5)
+>>> [c.MARKER for c in pool.available]
+[1, 2, 3]
+
+Similarly for the other two:
+
+>>> conns[1].close(); conns[2].close()
+>>> len(pool.available), len(pool.all)
+(3, 3)
+>>> [c.MARKER for c in pool.available]
+[3, 4, 5]
+
+Reducing the pool size may also purge the oldest closed connections:
+
+>>> db.setPoolSize(2)  # gets rid of MARKER 3
+>>> len(pool.available), len(pool.all)
+(2, 2)
+>>> [c.MARKER for c in pool.available]
+[4, 5]
+
+Since MARKER 5 is still the last one added to the stack, it will be the
+first popped:
+
+>>> c1 = db.open(); c2 = db.open()
+>>> c1.MARKER, c2.MARKER
+(5, 4)
+>>> len(pool.available), len(pool.all)
+(0, 2)
+
+Clean up.
+
+>>> st.close()
 >>> handler.uninstall()



More information about the Zodb-checkins mailing list