[Zodb-checkins] SVN: ZODB/trunk/src/ Merge ZODB trunk changes checked in from a wrong project.

Tim Peters tim.one at comcast.net
Thu Mar 3 12:02:25 EST 2005


Log message for revision 29391:
  Merge ZODB trunk changes checked in from a wrong project.
  
  r29290 | frerich | 2005-02-24 17:36:00 -0500 (Thu, 24 Feb 2005)
  Changed paths:
     M /Zope3/trunk/src/ZODB/tests/dbopen.txt
     ...
     minor editing
  
  r29247 | gintautasm | 2005-02-22 06:40:26 -0500 (Tue, 22 Feb 2005)
  Changed paths:
     M /Zope3/trunk/src/BTrees/Interfaces.py
     ...
     More minor nitpicks.  This should be the last one.
  
  

Changed:
  U   ZODB/trunk/src/BTrees/Interfaces.py
  U   ZODB/trunk/src/ZODB/tests/dbopen.txt

-=-
Modified: ZODB/trunk/src/BTrees/Interfaces.py
===================================================================
--- ZODB/trunk/src/BTrees/Interfaces.py	2005-03-03 15:42:54 UTC (rev 29390)
+++ ZODB/trunk/src/BTrees/Interfaces.py	2005-03-03 17:02:23 UTC (rev 29391)
@@ -14,6 +14,7 @@
 
 from zope.interface import Interface
 
+
 class ICollection(Interface):
 
     def clear():
@@ -42,6 +43,7 @@
         to, but not including, index2.
         """
 
+
 class IKeyed(ICollection):
 
     def has_key(key):
@@ -76,6 +78,7 @@
         greater than or equal to the argument.
         """
 
+
 class ISetMutable(IKeyed):
 
     def insert(key):
@@ -88,29 +91,34 @@
         """Remove the key from the set."""
 
     def update(seq):
-        """Add the items from the given sequence to the set"""
+        """Add the items from the given sequence to the set."""
 
+
 class ISized(Interface):
-    "anything supporting __len"
+    """An object that supports __len__."""
 
     def __len__():
-        """Return the number of items in the container"""
+        """Return the number of items in the container."""
 
+
 class IKeySequence(IKeyed, ISized):
 
     def __getitem__(index):
-        """Return the key in the given index position
+        """Return the key in the given index position.
 
         This allows iteration with for loops and use in functions,
         like map and list, that read sequences.
         """
 
+
 class ISet(IKeySequence, ISetMutable):
     pass
 
+
 class ITreeSet(IKeyed, ISetMutable):
     pass
 
+
 class IMinimalDictionary(ISized):
 
     def has_key(key):
@@ -205,6 +213,7 @@
         integer values, the normalization is division.
         """
 
+
 class IBTree(IDictionaryIsh):
 
     def insert(key, value):
@@ -226,6 +235,7 @@
               key=generate_key()
         """
 
+
 class IMerge(Interface):
     """Object with methods for merging sets, buckets, and trees.
 
@@ -275,6 +285,7 @@
         collections.
         """
 
+
 class IIMerge(IMerge):
     """Merge collections with integer value type.
 
@@ -347,6 +358,7 @@
         Note that c1 and c2 must be collections.
         """
 
+
 class IMergeIntegerKey(IMerge):
     """IMerge-able objects with integer keys.
 

Modified: ZODB/trunk/src/ZODB/tests/dbopen.txt
===================================================================
--- ZODB/trunk/src/ZODB/tests/dbopen.txt	2005-03-03 15:42:54 UTC (rev 29390)
+++ ZODB/trunk/src/ZODB/tests/dbopen.txt	2005-03-03 17:02:23 UTC (rev 29391)
@@ -1,186 +1,177 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
+=====================
+Connection Management
+=====================
 
+
 Here we exercise the connection management done by the DB class.
 
->>> from ZODB import DB
->>> from ZODB.MappingStorage import MappingStorage as Storage
+    >>> from ZODB import DB
+    >>> from ZODB.MappingStorage import MappingStorage as Storage
 
 Capturing log messages from DB is important for some of the examples:
 
->>> from zope.testing.loggingsupport import InstalledHandler
->>> handler = InstalledHandler('ZODB.DB')
+    >>> from zope.testing.loggingsupport import InstalledHandler
+    >>> handler = InstalledHandler('ZODB.DB')
 
 Create a storage, and wrap it in a DB wrapper:
 
->>> st = Storage()
->>> db = DB(st)
+    >>> st = Storage()
+    >>> db = DB(st)
 
 By default, we can open 7 connections without any log messages:
 
->>> conns = [db.open() for dummy in range(7)]
->>> handler.records
-[]
+    >>> conns = [db.open() for dummy in range(7)]
+    >>> handler.records
+    []
 
 Open one more, and we get a warning:
 
->>> conns.append(db.open())
->>> len(handler.records)
-1
->>> msg = handler.records[0]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB WARNING DB.open() has 8 open connections with a pool_size of 7
+    >>> conns.append(db.open())
+    >>> len(handler.records)
+    1
+    >>> msg = handler.records[0]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB WARNING DB.open() has 8 open connections with a pool_size of 7
 
 Open 6 more, and we get 6 more warnings:
 
->>> conns.extend([db.open() for dummy in range(6)])
->>> len(conns)
-14
->>> len(handler.records)
-7
->>> msg = handler.records[-1]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB WARNING DB.open() has 14 open connections with a pool_size of 7
+    >>> conns.extend([db.open() for dummy in range(6)])
+    >>> len(conns)
+    14
+    >>> len(handler.records)
+    7
+    >>> msg = handler.records[-1]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB WARNING DB.open() has 14 open connections with a pool_size of 7
 
 Add another, so that it's more than twice the default, and the level
 rises to critical:
 
->>> conns.append(db.open())
->>> len(conns)
-15
->>> len(handler.records)
-8
->>> msg = handler.records[-1]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB CRITICAL DB.open() has 15 open connections with a pool_size of 7
+    >>> conns.append(db.open())
+    >>> len(conns)
+    15
+    >>> len(handler.records)
+    8
+    >>> msg = handler.records[-1]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB CRITICAL DB.open() has 15 open connections with a pool_size of 7
 
 While it's boring, it's important to verify that the same relationships
 hold if the default pool size is overridden.
 
->>> handler.clear()
->>> st.close()
->>> st = Storage()
->>> PS = 2 # smaller pool size
->>> db = DB(st, pool_size=PS)
->>> conns = [db.open() for dummy in range(PS)]
->>> handler.records
-[]
+    >>> handler.clear()
+    >>> st.close()
+    >>> st = Storage()
+    >>> PS = 2 # smaller pool size
+    >>> db = DB(st, pool_size=PS)
+    >>> conns = [db.open() for dummy in range(PS)]
+    >>> handler.records
+    []
 
 A warning for opening one more:
 
->>> conns.append(db.open())
->>> len(handler.records)
-1
->>> msg = handler.records[0]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB WARNING DB.open() has 3 open connections with a pool_size of 2
+    >>> conns.append(db.open())
+    >>> len(handler.records)
+    1
+    >>> msg = handler.records[0]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB WARNING DB.open() has 3 open connections with a pool_size of 2
 
 More warnings through 4 connections:
 
->>> conns.extend([db.open() for dummy in range(PS-1)])
->>> len(conns)
-4
->>> len(handler.records)
-2
->>> msg = handler.records[-1]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
+    >>> conns.extend([db.open() for dummy in range(PS-1)])
+    >>> len(conns)
+    4
+    >>> len(handler.records)
+    2
+    >>> msg = handler.records[-1]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
 
 And critical for going beyond that:
 
->>> conns.append(db.open())
->>> len(conns)
-5
->>> len(handler.records)
-3
->>> msg = handler.records[-1]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB CRITICAL DB.open() has 5 open connections with a pool_size of 2
+    >>> conns.append(db.open())
+    >>> len(conns)
+    5
+    >>> len(handler.records)
+    3
+    >>> msg = handler.records[-1]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB CRITICAL DB.open() has 5 open connections with a pool_size of 2
 
 We can change the pool size on the fly:
 
->>> handler.clear()
->>> db.setPoolSize(6)
->>> conns.append(db.open())
->>> handler.records  # no log msg -- the pool is bigger now
-[]
->>> conns.append(db.open()) # but one more and there's a warning again
->>> len(handler.records)
-1
->>> msg = handler.records[0]
->>> print msg.name, msg.levelname, msg.getMessage()
-ZODB.DB WARNING DB.open() has 7 open connections with a pool_size of 6
+    >>> handler.clear()
+    >>> db.setPoolSize(6)
+    >>> conns.append(db.open())
+    >>> handler.records  # no log msg -- the pool is bigger now
+    []
+    >>> conns.append(db.open()) # but one more and there's a warning again
+    >>> len(handler.records)
+    1
+    >>> msg = handler.records[0]
+    >>> print msg.name, msg.levelname, msg.getMessage()
+    ZODB.DB WARNING DB.open() has 7 open connections with a pool_size of 6
 
 Enough of that.
 
->>> handler.clear()
->>> st.close()
+    >>> handler.clear()
+    >>> st.close()
 
 More interesting is the stack-like nature of connection reuse.  So long as
 we keep opening new connections, and keep them alive, all connections
 returned are distinct:
 
->>> st = Storage()
->>> db = DB(st)
->>> c1 = db.open()
->>> c2 = db.open()
->>> c3 = db.open()
->>> c1 is c2 or c1 is c3 or c2 is c3
-False
+    >>> st = Storage()
+    >>> db = DB(st)
+    >>> c1 = db.open()
+    >>> c2 = db.open()
+    >>> c3 = db.open()
+    >>> c1 is c2 or c1 is c3 or c2 is c3
+    False
 
 Let's put some markers on the connections, so we can identify these
 specific objects later:
 
->>> c1.MARKER = 'c1'
->>> c2.MARKER = 'c2'
->>> c3.MARKER = 'c3'
+    >>> c1.MARKER = 'c1'
+    >>> c2.MARKER = 'c2'
+    >>> c3.MARKER = 'c3'
 
 Now explicitly close c1 and c2:
 
->>> c1.close()
->>> c2.close()
+    >>> c1.close()
+    >>> c2.close()
 
 Reaching into the internals, we can see that db's connection pool now has
 two connections available for reuse, and knows about three connections in
 all:
 
->>> pool = db._pools['']
->>> len(pool.available)
-2
->>> len(pool.all)
-3
+    >>> pool = db._pools['']
+    >>> len(pool.available)
+    2
+    >>> len(pool.all)
+    3
 
 Since we closed c2 last, it's at the top of the available stack, so will
 be reused by the next open():
 
->>> c1 = db.open()
->>> c1.MARKER
-'c2'
->>> len(pool.available), len(pool.all)
-(1, 3)
+    >>> c1 = db.open()
+    >>> c1.MARKER
+    'c2'
+    >>> len(pool.available), len(pool.all)
+    (1, 3)
 
->>> c3.close()  # now the stack has c3 on top, then c1
->>> c2 = db.open()
->>> c2.MARKER
-'c3'
->>> len(pool.available), len(pool.all)
-(1, 3)
->>> c3 = db.open()
->>> c3.MARKER
-'c1'
->>> len(pool.available), len(pool.all)
-(0, 3)
+    >>> c3.close()  # now the stack has c3 on top, then c1
+    >>> c2 = db.open()
+    >>> c2.MARKER
+    'c3'
+    >>> len(pool.available), len(pool.all)
+    (1, 3)
+    >>> c3 = db.open()
+    >>> c3.MARKER
+    'c1'
+    >>> len(pool.available), len(pool.all)
+    (0, 3)
 
 What about the 3 in pool.all?  We've seen that closing connections doesn't
 reduce pool.all, and it would be bad if DB kept connections alive forever.
@@ -191,97 +182,97 @@
 that are still alive.
 
 
->>> len(db.cacheDetailSize())  # one result for each connection's cache
-3
+    >>> len(db.cacheDetailSize())  # one result for each connection's cache
+    3
 
 If a connection object is abandoned (it becomes unreachable), then it
 will vanish from pool.all automatically.  However, connections are
 involved in cycles, so exactly when a connection vanishes from pool.all
 isn't predictable.  It can be forced by running gc.collect():
 
->>> import gc
->>> dummy = gc.collect()
->>> len(pool.all)
-3
->>> c3 = None
->>> dummy = gc.collect()  # removes c3 from pool.all
->>> len(pool.all)
-2
+    >>> import gc
+    >>> dummy = gc.collect()
+    >>> len(pool.all)
+    3
+    >>> c3 = None
+    >>> dummy = gc.collect()  # removes c3 from pool.all
+    >>> len(pool.all)
+    2
 
 Note that c3 is really gone; in particular it didn't get added back to
 the stack of available connections by magic:
 
->>> len(pool.available)
-0
+    >>> len(pool.available)
+    0
 
 Nothing in that last block should have logged any msgs:
 
->>> handler.records
-[]
+    >>> handler.records
+    []
 
 If "too many" connections are open, then closing one may kick an older
 closed one out of the available connection stack.
 
->>> st.close()
->>> st = Storage()
->>> db = DB(st, pool_size=3)
->>> conns = [db.open() for dummy in range(6)]
->>> len(handler.records)  # 3 warnings for the "excess" connections
-3
->>> pool = db._pools['']
->>> len(pool.available), len(pool.all)
-(0, 6)
+    >>> st.close()
+    >>> st = Storage()
+    >>> db = DB(st, pool_size=3)
+    >>> conns = [db.open() for dummy in range(6)]
+    >>> len(handler.records)  # 3 warnings for the "excess" connections
+    3
+    >>> pool = db._pools['']
+    >>> len(pool.available), len(pool.all)
+    (0, 6)
 
 Let's mark them:
 
->>> for i, c in enumerate(conns):
-...     c.MARKER = i
+    >>> for i, c in enumerate(conns):
+    ...     c.MARKER = i
 
 Closing connections adds them to the stack:
 
->>> for i in range(3):
-...     conns[i].close()
->>> len(pool.available), len(pool.all)
-(3, 6)
->>> del conns[:3]  # leave the ones with MARKERs 3, 4 and 5
+    >>> for i in range(3):
+    ...     conns[i].close()
+    >>> len(pool.available), len(pool.all)
+    (3, 6)
+    >>> del conns[:3]  # leave the ones with MARKERs 3, 4 and 5
 
 Closing another one will purge the one with MARKER 0 from the stack
 (since it was the first added to the stack):
 
->>> [c.MARKER for c in pool.available]
-[0, 1, 2]
->>> conns[0].close()  # MARKER 3
->>> len(pool.available), len(pool.all)
-(3, 5)
->>> [c.MARKER for c in pool.available]
-[1, 2, 3]
+    >>> [c.MARKER for c in pool.available]
+    [0, 1, 2]
+    >>> conns[0].close()  # MARKER 3
+    >>> len(pool.available), len(pool.all)
+    (3, 5)
+    >>> [c.MARKER for c in pool.available]
+    [1, 2, 3]
 
 Similarly for the other two:
 
->>> conns[1].close(); conns[2].close()
->>> len(pool.available), len(pool.all)
-(3, 3)
->>> [c.MARKER for c in pool.available]
-[3, 4, 5]
+    >>> conns[1].close(); conns[2].close()
+    >>> len(pool.available), len(pool.all)
+    (3, 3)
+    >>> [c.MARKER for c in pool.available]
+    [3, 4, 5]
 
 Reducing the pool size may also purge the oldest closed connections:
 
->>> db.setPoolSize(2)  # gets rid of MARKER 3
->>> len(pool.available), len(pool.all)
-(2, 2)
->>> [c.MARKER for c in pool.available]
-[4, 5]
+    >>> db.setPoolSize(2)  # gets rid of MARKER 3
+    >>> len(pool.available), len(pool.all)
+    (2, 2)
+    >>> [c.MARKER for c in pool.available]
+    [4, 5]
 
 Since MARKER 5 is still the last one added to the stack, it will be the
 first popped:
 
->>> c1 = db.open(); c2 = db.open()
->>> c1.MARKER, c2.MARKER
-(5, 4)
->>> len(pool.available), len(pool.all)
-(0, 2)
+    >>> c1 = db.open(); c2 = db.open()
+    >>> c1.MARKER, c2.MARKER
+    (5, 4)
+    >>> len(pool.available), len(pool.all)
+    (0, 2)
 
 Clean up.
 
->>> st.close()
->>> handler.uninstall()
+    >>> st.close()
+    >>> handler.uninstall()



More information about the Zodb-checkins mailing list