[Zope-Checkins] CVS: ZODB3/ZODB - Connection.py:1.98.4.2 DB.py:1.53.2.2 FileStorage.py:1.135.6.3 POSException.py:1.20.4.1 TmpStore.py:1.10.10.1 Transaction.py:1.49.4.1 fspack.py:1.8.8.4 fsrecover.py:1.12.6.1 transact.py:1.2.4.1 utils.py:1.17.4.1

Tim Peters tim.one at comcast.net
Mon Sep 15 17:27:28 EDT 2003


Update of /cvs-repository/ZODB3/ZODB
In directory cvs.zope.org:/tmp/cvs-serv17778/ZODB

Modified Files:
      Tag: Zope-2_7-branch
	Connection.py DB.py FileStorage.py POSException.py TmpStore.py 
	Transaction.py fspack.py fsrecover.py transact.py utils.py 
Log Message:
Whitespace normalization (via Python's reindent.py script).


=== ZODB3/ZODB/Connection.py 1.98.4.1 => 1.98.4.2 ===
--- ZODB3/ZODB/Connection.py:1.98.4.1	Mon Sep 15 14:02:58 2003
+++ ZODB3/ZODB/Connection.py	Mon Sep 15 17:26:56 2003
@@ -308,12 +308,12 @@
                 method_name, args, kw = self.__onCommitActions.pop(0)
                 getattr(self, method_name)(transaction, *args, **kw)
             return
-        
+
         oid = object._p_oid
         if self._conflicts.has_key(oid):
             self.getTransaction().register(object)
             raise ReadConflictError(object=object)
-        
+
         invalid = self._invalid
         if oid is None or object._p_jar is not self:
             # new object
@@ -625,7 +625,7 @@
         else:
             self.getTransaction().register(obj)
             raise ReadConflictError(object=obj)
-        
+
     def oldstate(self, object, serial):
         oid=object._p_oid
         p = self._storage.loadSerial(oid, serial)
@@ -717,7 +717,7 @@
         # the connection does not match what is written to the
         # database.  Invalidate the object here to guarantee that
         # the new state is read the next time the object is used.
-        
+
         if not store_return:
             return
         if isinstance(store_return, StringType):
@@ -757,7 +757,7 @@
             def callback():
                 d = {}
                 for oid in self._modified:
-                    d[oid] = 1 
+                    d[oid] = 1
                 self._db.invalidate(d, self)
             self._storage.tpc_finish(transaction, callback)
 


=== ZODB3/ZODB/DB.py 1.53.2.1 => 1.53.2.2 ===
--- ZODB3/ZODB/DB.py:1.53.2.1	Mon Sep 15 14:02:58 2003
+++ ZODB3/ZODB/DB.py	Mon Sep 15 17:26:56 2003
@@ -145,9 +145,9 @@
                 # We need to break circular refs to make it really go.
                 # XXX What objects are involved in the cycle?
                 connection.__dict__.clear()
-                
+
                 return
-                
+
             pool.append(connection)
             if len(pool)==1:
                 # Pool now usable again, unlock it.


=== ZODB3/ZODB/FileStorage.py 1.135.6.2 => 1.135.6.3 ===
--- ZODB3/ZODB/FileStorage.py:1.135.6.2	Mon Sep 15 14:02:58 2003
+++ ZODB3/ZODB/FileStorage.py	Mon Sep 15 17:26:56 2003
@@ -293,7 +293,7 @@
         self._records_before_save = max(self._records_before_save,
                                         len(self._index))
         self._ltid = tid
-        
+
         # self._pos should always point just past the last
         # transaction.  During 2PC, data is written after _pos.
         # invariant is restored at tpc_abort() or tpc_finish().
@@ -607,7 +607,7 @@
                 self._file.seek(u64(pnv))
                 h_pnv = self._file.read(DATA_VERSION_HDR_LEN)
                 newserial = h_pnv[8:16]
-            
+
             if self._index.get(oid) == srcpos:
                 # This is a current record!
                 self._tindex[oid] = here
@@ -981,7 +981,7 @@
             else:
                 warn("restore could not find previous non-version data "
                      "at %d or %d" % (prev, bp))
-            
+
         return pnv
 
     def supportsUndo(self):
@@ -1075,7 +1075,7 @@
             if fsync is not None: fsync(file.fileno())
 
             self._pos = nextpos
-            
+
             self._index.update(self._tindex)
             self._vindex.update(self._tvindex)
             self._oid2serial.update(self._toid2serial)
@@ -1084,16 +1084,16 @@
                     del self._oid2serial[oid]
                 except KeyError:
                     pass
-            
+
             # Update the number of records that we've written
             # +1 for the transaction record
-            self._records_written += len(self._tindex) + 1 
+            self._records_written += len(self._tindex) + 1
             if self._records_written >= self._records_before_save:
                 self._save_index()
                 self._records_written = 0
                 self._records_before_save = max(self._records_before_save,
                                                 len(self._index))
-                
+
         self._ltid = tid
 
     def _abort(self):
@@ -1531,14 +1531,14 @@
         """
         if self._is_read_only:
             raise POSException.ReadOnlyError()
-        
+
         stop=`TimeStamp(*time.gmtime(t)[:5]+(t%60,))`
         if stop==z64: raise FileStorageError, 'Invalid pack time'
 
         # If the storage is empty, there's nothing to do.
         if not self._index:
             return
-        
+
         # Record pack time so we don't undo while packing
         self._lock_acquire()
         try:


=== ZODB3/ZODB/POSException.py 1.20 => 1.20.4.1 ===
--- ZODB3/ZODB/POSException.py:1.20	Tue Jun 10 11:46:31 2003
+++ ZODB3/ZODB/POSException.py	Mon Sep 15 17:26:56 2003
@@ -173,7 +173,7 @@
 
 class MultipleUndoErrors(UndoError):
     """Several undo errors occured during a single transaction."""
-    
+
     def __init__(self, errs):
         # provide a reason and oid for clients that only look at that
         UndoError.__init__(self, *errs[0])


=== ZODB3/ZODB/TmpStore.py 1.10 => 1.10.10.1 ===
--- ZODB3/ZODB/TmpStore.py:1.10	Tue Apr  8 14:48:22 2003
+++ ZODB3/ZODB/TmpStore.py	Mon Sep 15 17:26:56 2003
@@ -42,7 +42,7 @@
 
     def getName(self):
         return self._db.getName()
-    
+
     def getSize(self):
         return self._pos
 


=== ZODB3/ZODB/Transaction.py 1.49 => 1.49.4.1 ===
--- ZODB3/ZODB/Transaction.py:1.49	Tue Jun 10 11:46:31 2003
+++ ZODB3/ZODB/Transaction.py	Mon Sep 15 17:26:56 2003
@@ -43,7 +43,7 @@
     except:
         LOG("TM", WARNING, "jar missing sortKey() method: %s" % j2)
         k2 = id(j2)
-        
+
     return cmp(k1, k2)
 
 class Transaction:
@@ -271,12 +271,12 @@
 
     def _get_jars(self, objects, subtransaction):
         # Returns a list of jars for this transaction.
-        
+
         # Find all the jars and sort them in a globally consistent order.
         # objects is a list of persistent objects and jars.
         # If this is a subtransaction and a jar is not subtransaction aware,
         # it's object gets delayed until the parent transaction commits.
-        
+
         d = {}
         for o in objects:
             jar = getattr(o, '_p_jar', o)
@@ -298,7 +298,7 @@
                     if self._non_st_objects is None:
                         self._non_st_objects = []
                     self._non_st_objects.append(o)
-                
+
         jars = d.values()
         jars.sort(jar_cmp)
 
@@ -406,7 +406,7 @@
 
         # After the tpc_abort(), call abort_sub() on all the
         # subtrans-aware jars to *really* abort the subtransaction.
-        
+
         # Example: For Connection(), the tpc_abort() will abort the
         # subtransaction TmpStore() and abort_sub() will remove the
         # TmpStore.


=== ZODB3/ZODB/fspack.py 1.8.8.3 => 1.8.8.4 ===
--- ZODB3/ZODB/fspack.py:1.8.8.3	Mon Sep 15 14:02:57 2003
+++ ZODB3/ZODB/fspack.py	Mon Sep 15 17:26:56 2003
@@ -192,7 +192,7 @@
             if dh.plen:
                 self.fail(pos, "data record has back pointer and data")
 
-def DataHeaderFromString(s):            
+def DataHeaderFromString(s):
     return DataHeader(*struct.unpack(DATA_HDR, s))
 
 class DataHeader:
@@ -338,7 +338,7 @@
                 return pos
             pos += h.recordlen()
         return 0
-    
+
     def _restore_pnv(self, oid, prev, version, bp):
         # Find a valid pnv (previous non-version) pointer for this version.
 
@@ -415,7 +415,7 @@
                 self._tfile.write(z64)
         else:
             self._tfile.write(data)
-            
+
 class GC(FileStorageFormatter):
 
     def __init__(self, file, eof, packtime):
@@ -437,7 +437,7 @@
         # second is a dictionary mapping objects to lists of
         # positions; it is used to handle the same number of objects
         # for which we must keep multiple revisions.
-        
+
         self.reachable = fsIndex()
         self.reach_ex = {}
 
@@ -460,7 +460,7 @@
         self.findReachableFromFuture()
         # These mappings are no longer needed and may consume a lot
         # of space.
-        del self.oid2verpos 
+        del self.oid2verpos
         del self.oid2curpos
 
     def buildPackIndex(self):
@@ -528,7 +528,7 @@
         # non-current revision could refer to objects that were
         # otherwise unreachable at the packtime.
         extra_roots = []
-        
+
         pos = self.packpos
         while pos < self.eof:
             th = self._read_txn_header(pos)
@@ -558,7 +558,7 @@
                             extra_roots.append(dh.pnv)
                     else:
                         self.reachable[dh.oid] = dh.back
-                        
+
                 pos += dh.recordlen()
 
             tlen = self._read_num(pos)
@@ -631,7 +631,7 @@
         self._file.seek(0, 2)
         self.file_end = self._file.tell()
         self._file.seek(0)
-        
+
         self.gc = GC(self._file, self.file_end, self._stop)
 
         # The packer needs to acquire the parent's commit lock
@@ -648,7 +648,7 @@
         # tindex: oid -> pos, for current txn
         # tvindex: version -> pos of XXX, for current txn
         # oid2serial: not used by the packer
-        
+
         self.index = fsIndex()
         self.vindex = {}
         self.tindex = {}
@@ -672,7 +672,7 @@
         # Txn and data records contain pointers to previous records.
         # Because these pointers are stored as file offsets, they
         # must be updated when we copy data.
-        
+
         # XXX Need to add sanity checking to pack
 
         self.gc.findReachable()
@@ -730,7 +730,7 @@
                 self._tfile.seek(new_pos - 8)
                 self._tfile.write(p64(tlen))
 
-            
+
             tlen = self._read_num(pos)
             if tlen != th.tlen:
                 self.fail(pos, "redundant transaction length does not "
@@ -757,7 +757,7 @@
 
         Returns position of txn header in output file and position
         of next record in the input file.
-        
+
         If any data records are copied, also write txn header (th).
         """
         copy = 0
@@ -878,4 +878,3 @@
         if self._lock_counter % 20 == 0:
             self._commit_lock_acquire()
         return ipos
-


=== ZODB3/ZODB/fsrecover.py 1.12 => 1.12.6.1 ===
--- ZODB3/ZODB/fsrecover.py:1.12	Fri May 30 15:59:55 2003
+++ ZODB3/ZODB/fsrecover.py	Mon Sep 15 17:26:56 2003
@@ -227,7 +227,7 @@
     except getopt.error:
         die()
         print __doc__ % argv[0]
-        
+
     force = partial = verbose = 0
     pack = None
     for opt, v in opts:
@@ -321,7 +321,7 @@
                         l = "bp"
                     else:
                         l = len(r.data)
-                        
+
                     print "%7d %s %s" % (u64(r.oid), l, r.version)
                 s = ofs.restore(r.oid, r.serial, r.data, r.version,
                                 r.data_txn, txn)
@@ -372,4 +372,3 @@
 
 if __name__ == "__main__":
     main()
-


=== ZODB3/ZODB/transact.py 1.2 => 1.2.4.1 ===
--- ZODB3/ZODB/transact.py:1.2	Wed Jun 11 11:05:07 2003
+++ ZODB3/ZODB/transact.py	Mon Sep 15 17:26:56 2003
@@ -34,7 +34,7 @@
     """
 
     # XXX deal with ZEO disconnected errors?
-    
+
     def g(*args, **kwargs):
         n = retries
         while n:


=== ZODB3/ZODB/utils.py 1.17 => 1.17.4.1 ===
--- ZODB3/ZODB/utils.py:1.17	Tue Jun 10 11:46:31 2003
+++ ZODB3/ZODB/utils.py	Mon Sep 15 17:26:56 2003
@@ -96,4 +96,3 @@
         return repr(oid)
 
 serial_repr = oid_repr
-




More information about the Zope-Checkins mailing list