[Zope-Checkins] CVS: Zope2 - asynchat.py:1.17 auth_handler.py:1.3 chat_server.py:1.3 continuation.py:1.3 counter.py:1.7 default_handler.py:1.7 event_loop.py:1.3 fifo.py:1.3 filesys.py:1.9 ftp_server.py:1.17 http_bobo.py:1.4 http_date.py:1.8 http_server.py:1.24 logger.py:1.11 m_syslog.py:1.11 monitor.py:1.12 monitor_client.py:1.8 monitor_client_win32.py:1.7 producers.py:1.10 put_handler.py:1.3 redirecting_handler.py:1.3 resolver.py:1.9 rpc_client.py:1.3 rpc_server.py:1.3 script_handler.py:1.3 start_medusa.py:1.3 status_handler.py:1.7 unix_user_handler.py:1.3 virtual_handler.py:1.3 xmlrpc_handler.py:1.3

andreas@serenade.digicool.com andreas@serenade.digicool.com
Tue, 1 May 2001 07:44:50 -0400


Update of /cvs-repository/Zope2/ZServer/medusa
In directory serenade.digicool.com:/tmp/cvs-serv12311

Modified Files:
	asynchat.py auth_handler.py chat_server.py continuation.py 
	counter.py default_handler.py event_loop.py fifo.py filesys.py 
	ftp_server.py http_bobo.py http_date.py http_server.py 
	logger.py m_syslog.py monitor.py monitor_client.py 
	monitor_client_win32.py producers.py put_handler.py 
	redirecting_handler.py resolver.py rpc_client.py rpc_server.py 
	script_handler.py start_medusa.py status_handler.py 
	unix_user_handler.py virtual_handler.py xmlrpc_handler.py 
Log Message:
we *hate* tabs - lets get rid of them



--- Updated File asynchat.py in package Zope2 --
--- asynchat.py	2001/04/25 19:07:29	1.16
+++ asynchat.py	2001/05/01 11:44:48	1.17
@@ -51,242 +51,242 @@
 import string
 
 class async_chat (asyncore.dispatcher):
-	"""This is an abstract class.  You must derive from this class, and add
-	the two methods collect_incoming_data() and found_terminator()"""
-
-	# these are overridable defaults
-
-	ac_in_buffer_size	= 4096
-	ac_out_buffer_size	= 4096
-
-	def __init__ (self, conn=None):
-		self.ac_in_buffer = ''
-		self.ac_out_buffer = ''
-		self.producer_fifo = fifo()
-		asyncore.dispatcher.__init__ (self, conn)
-
-	def set_terminator (self, term):
-		"Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
-		self.terminator = term
-
-	def get_terminator (self):
-		return self.terminator
-
-	# grab some more data from the socket,
-	# throw it to the collector method,
-	# check for the terminator,
-	# if found, transition to the next state.
-
-	def handle_read (self):
-
-		try:
-			data = self.recv (self.ac_in_buffer_size)
-		except socket.error, why:
-			self.handle_error()
-			return
-
-		self.ac_in_buffer = self.ac_in_buffer + data
-
-		# Continue to search for self.terminator in self.ac_in_buffer,
-		# while calling self.collect_incoming_data.  The while loop
-		# is necessary because we might read several data+terminator
-		# combos with a single recv(1024).
-
-		while self.ac_in_buffer:
-			lb = len(self.ac_in_buffer)
-			terminator = self.get_terminator()
-			if terminator is None:
-				# no terminator, collect it all
-				self.collect_incoming_data (self.ac_in_buffer)
-				self.ac_in_buffer = ''
-			elif type(terminator) == type(0):
-				# numeric terminator
-				n = terminator
-				if lb < n:
-					self.collect_incoming_data (self.ac_in_buffer)
-					self.ac_in_buffer = ''
-					self.terminator = self.terminator - lb
-				else:
-					self.collect_incoming_data (self.ac_in_buffer[:n])
-					self.ac_in_buffer = self.ac_in_buffer[n:]
-					self.terminator = 0
-					self.found_terminator()
-			else:
-				# 3 cases:
-				# 1) end of buffer matches terminator exactly:
-				#    collect data, transition
-				# 2) end of buffer matches some prefix:
-				#    collect data to the prefix
-				# 3) end of buffer does not match any prefix:
-				#    collect data
-				terminator_len = len(terminator)
-				index = string.find (self.ac_in_buffer, terminator)
-				if index != -1:
-					# we found the terminator
-					if index > 0:
-						# don't bother reporting the empty string (source of subtle bugs)
-						self.collect_incoming_data (self.ac_in_buffer[:index])
-					self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
-					# This does the Right Thing if the terminator is changed here.
-					self.found_terminator()
-				else:
-					# check for a prefix of the terminator
-					index = find_prefix_at_end (self.ac_in_buffer, terminator)
-					if index:
-						if index != lb:
-							# we found a prefix, collect up to the prefix
-							self.collect_incoming_data (self.ac_in_buffer[:-index])
-							self.ac_in_buffer = self.ac_in_buffer[-index:]
-						break
-					else:
-						# no prefix, collect it all
-						self.collect_incoming_data (self.ac_in_buffer)
-						self.ac_in_buffer = ''
-
-	def handle_write (self):
-		self.initiate_send ()
-		
-	def handle_close (self):
-		self.close()
-
-	def push (self, data):
-		self.producer_fifo.push (simple_producer (data))
-		self.initiate_send()
-
-	def push_with_producer (self, producer):
-		self.producer_fifo.push (producer)
-		self.initiate_send()
-
-	def readable (self):
-		"predicate for inclusion in the readable for select()"
-		return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
-
-	def writable (self):
-		"predicate for inclusion in the writable for select()"
-		# return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
-		# this is about twice as fast, though not as clear.
-		return not (
-			(self.ac_out_buffer is '') and
-			self.producer_fifo.is_empty() and
-			self.connected
-			)
-
-	def close_when_done (self):
-		"automatically close this channel once the outgoing queue is empty"
-		self.producer_fifo.push (None)
-
-	# refill the outgoing buffer by calling the more() method
-	# of the first producer in the queue
-	def refill_buffer (self):
-		_string_type = type('')
-		while 1:
-			if len(self.producer_fifo):
-				p = self.producer_fifo.first()
-				# a 'None' in the producer fifo is a sentinel,
-				# telling us to close the channel.
-				if p is None:
-					if not self.ac_out_buffer:
-						self.producer_fifo.pop()
-						self.close()
-					return
-				elif type(p) is _string_type:
-					self.producer_fifo.pop()
-					self.ac_out_buffer = self.ac_out_buffer + p
-					return
-				data = p.more()
-				if data:
-					self.ac_out_buffer = self.ac_out_buffer + data
-					return
-				else:
-					self.producer_fifo.pop()
-			else:
-				return
-
-	def initiate_send (self):
-		obs = self.ac_out_buffer_size
-		# try to refill the buffer
-		if (len (self.ac_out_buffer) < obs):
-			self.refill_buffer()
-
-		if self.ac_out_buffer and self.connected:
-			# try to send the buffer
-			try:
-				num_sent = self.send (self.ac_out_buffer[:obs])
-				if num_sent:
-					self.ac_out_buffer = self.ac_out_buffer[num_sent:]
-
-			except socket.error, why:
-				self.handle_error()
-				return
-
-	def discard_buffers (self):
-		# Emergencies only!
-		self.ac_in_buffer = ''
-		self.ac_out_buffer = ''
-		while self.producer_fifo:
-			self.producer_fifo.pop()
-
-
+    """This is an abstract class.  You must derive from this class, and add
+    the two methods collect_incoming_data() and found_terminator()"""
+    
+    # these are overridable defaults
+    
+    ac_in_buffer_size	= 4096
+    ac_out_buffer_size	= 4096
+    
+    def __init__ (self, conn=None):
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        self.producer_fifo = fifo()
+        asyncore.dispatcher.__init__ (self, conn)
+        
+    def set_terminator (self, term):
+        "Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
+        self.terminator = term
+        
+    def get_terminator (self):
+        return self.terminator
+        
+        # grab some more data from the socket,
+        # throw it to the collector method,
+        # check for the terminator,
+        # if found, transition to the next state.
+        
+    def handle_read (self):
+    
+        try:
+            data = self.recv (self.ac_in_buffer_size)
+        except socket.error, why:
+            self.handle_error()
+            return
+            
+        self.ac_in_buffer = self.ac_in_buffer + data
+        
+        # Continue to search for self.terminator in self.ac_in_buffer,
+        # while calling self.collect_incoming_data.  The while loop
+        # is necessary because we might read several data+terminator
+        # combos with a single recv(1024).
+        
+        while self.ac_in_buffer:
+            lb = len(self.ac_in_buffer)
+            terminator = self.get_terminator()
+            if terminator is None:
+                    # no terminator, collect it all
+                self.collect_incoming_data (self.ac_in_buffer)
+                self.ac_in_buffer = ''
+            elif type(terminator) == type(0):
+                    # numeric terminator
+                n = terminator
+                if lb < n:
+                    self.collect_incoming_data (self.ac_in_buffer)
+                    self.ac_in_buffer = ''
+                    self.terminator = self.terminator - lb
+                else:
+                    self.collect_incoming_data (self.ac_in_buffer[:n])
+                    self.ac_in_buffer = self.ac_in_buffer[n:]
+                    self.terminator = 0
+                    self.found_terminator()
+            else:
+                    # 3 cases:
+                    # 1) end of buffer matches terminator exactly:
+                    #    collect data, transition
+                    # 2) end of buffer matches some prefix:
+                    #    collect data to the prefix
+                    # 3) end of buffer does not match any prefix:
+                    #    collect data
+                terminator_len = len(terminator)
+                index = string.find (self.ac_in_buffer, terminator)
+                if index != -1:
+                        # we found the terminator
+                    if index > 0:
+                            # don't bother reporting the empty string (source of subtle bugs)
+                        self.collect_incoming_data (self.ac_in_buffer[:index])
+                    self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+                    # This does the Right Thing if the terminator is changed here.
+                    self.found_terminator()
+                else:
+                        # check for a prefix of the terminator
+                    index = find_prefix_at_end (self.ac_in_buffer, terminator)
+                    if index:
+                        if index != lb:
+                                # we found a prefix, collect up to the prefix
+                            self.collect_incoming_data (self.ac_in_buffer[:-index])
+                            self.ac_in_buffer = self.ac_in_buffer[-index:]
+                        break
+                    else:
+                            # no prefix, collect it all
+                        self.collect_incoming_data (self.ac_in_buffer)
+                        self.ac_in_buffer = ''
+                        
+    def handle_write (self):
+        self.initiate_send ()
+        
+    def handle_close (self):
+        self.close()
+        
+    def push (self, data):
+        self.producer_fifo.push (simple_producer (data))
+        self.initiate_send()
+        
+    def push_with_producer (self, producer):
+        self.producer_fifo.push (producer)
+        self.initiate_send()
+        
+    def readable (self):
+        "predicate for inclusion in the readable for select()"
+        return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+        
+    def writable (self):
+        "predicate for inclusion in the writable for select()"
+        # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+        # this is about twice as fast, though not as clear.
+        return not (
+                (self.ac_out_buffer is '') and
+                self.producer_fifo.is_empty() and
+                self.connected
+                )
+        
+    def close_when_done (self):
+        "automatically close this channel once the outgoing queue is empty"
+        self.producer_fifo.push (None)
+        
+        # refill the outgoing buffer by calling the more() method
+        # of the first producer in the queue
+    def refill_buffer (self):
+        _string_type = type('')
+        while 1:
+            if len(self.producer_fifo):
+                p = self.producer_fifo.first()
+                # a 'None' in the producer fifo is a sentinel,
+                # telling us to close the channel.
+                if p is None:
+                    if not self.ac_out_buffer:
+                        self.producer_fifo.pop()
+                        self.close()
+                    return
+                elif type(p) is _string_type:
+                    self.producer_fifo.pop()
+                    self.ac_out_buffer = self.ac_out_buffer + p
+                    return
+                data = p.more()
+                if data:
+                    self.ac_out_buffer = self.ac_out_buffer + data
+                    return
+                else:
+                    self.producer_fifo.pop()
+            else:
+                return
+                
+    def initiate_send (self):
+        obs = self.ac_out_buffer_size
+        # try to refill the buffer
+        if (len (self.ac_out_buffer) < obs):
+            self.refill_buffer()
+            
+        if self.ac_out_buffer and self.connected:
+                # try to send the buffer
+            try:
+                num_sent = self.send (self.ac_out_buffer[:obs])
+                if num_sent:
+                    self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+                    
+            except socket.error, why:
+                self.handle_error()
+                return
+                
+    def discard_buffers (self):
+            # Emergencies only!
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        while self.producer_fifo:
+            self.producer_fifo.pop()
+            
+            
 class simple_producer:
-
-	def __init__ (self, data, buffer_size=512):
-		self.data = data
-		self.buffer_size = buffer_size
 
-	def more (self):
-		if len (self.data) > self.buffer_size:
-			result = self.data[:self.buffer_size]
-			self.data = self.data[self.buffer_size:]
-			return result
-		else:
-			result = self.data
-			self.data = ''
-			return result
-
+    def __init__ (self, data, buffer_size=512):
+        self.data = data
+        self.buffer_size = buffer_size
+        
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+            
 class fifo:
-	def __init__ (self, list=None):
-		if not list:
-			self.list = []
-		else:
-			self.list = list
-		
-	def __len__ (self):
-		return len(self.list)
-
-	def is_empty (self):
-		return self.list == []
-
-	def first (self):
-		return self.list[0]
-
-	def push (self, data):
-		self.list.append (data)
-
-	def pop (self):
-		if self.list:
-			result = self.list[0]
-			del self.list[0]
-			return (1, result)
-		else:
-			return (0, None)
-
-# Given 'haystack', see if any prefix of 'needle' is at its end.  This
-# assumes an exact match has already been checked.  Return the number of
-# characters matched.
-# for example:
-# f_p_a_e ("qwerty\r", "\r\n") => 1
-# f_p_a_e ("qwertydkjf", "\r\n") => 0
-# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
-
-# this could maybe be made faster with a computed regex?
-# [answer: no; circa Python-2.0, Jan 2001]
-# new python:   28961/s
-# old python:   18307/s
-# re:           12820/s
-# regex:        14035/s
-
+    def __init__ (self, list=None):
+        if not list:
+            self.list = []
+        else:
+            self.list = list
+            
+    def __len__ (self):
+        return len(self.list)
+        
+    def is_empty (self):
+        return self.list == []
+        
+    def first (self):
+        return self.list[0]
+        
+    def push (self, data):
+        self.list.append (data)
+        
+    def pop (self):
+        if self.list:
+            result = self.list[0]
+            del self.list[0]
+            return (1, result)
+        else:
+            return (0, None)
+            
+            # Given 'haystack', see if any prefix of 'needle' is at its end.  This
+            # assumes an exact match has already been checked.  Return the number of
+            # characters matched.
+            # for example:
+            # f_p_a_e ("qwerty\r", "\r\n") => 1
+            # f_p_a_e ("qwertydkjf", "\r\n") => 0
+            # f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
+            
+            # this could maybe be made faster with a computed regex?
+            # [answer: no; circa Python-2.0, Jan 2001]
+            # new python:   28961/s
+            # old python:   18307/s
+            # re:           12820/s
+            # regex:        14035/s
+            
 def find_prefix_at_end (haystack, needle):
-	l = len(needle) - 1
-	while l and not haystack.endswith(needle[:l]):
-		l -= 1
-	return l
+    l = len(needle) - 1
+    while l and not haystack.endswith(needle[:l]):
+        l -= 1
+    return l

--- Updated File auth_handler.py in package Zope2 --
--- auth_handler.py	2001/04/25 19:07:30	1.2
+++ auth_handler.py	2001/05/01 11:44:48	1.3
@@ -30,108 +30,108 @@
 # does anyone support digest authentication? (rfc2069)
 
 class auth_handler:
-	def __init__ (self, dict, handler, realm='default'):
-		self.authorizer = dictionary_authorizer (dict)
-		self.handler = handler
-		self.realm = realm
-		self.pass_count = counter.counter()
-		self.fail_count = counter.counter()
-
-	def match (self, request):
-		# by default, use the given handler's matcher
-		return self.handler.match (request)
-				
-	def handle_request (self, request):
-		# authorize a request before handling it...
-		scheme = get_header (AUTHORIZATION, request.header)
-
-		if scheme:
-			scheme = string.lower (scheme)
-			if scheme == 'basic':
-				cookie = AUTHORIZATION.group(2)
-				try:
-					decoded = base64.decodestring (cookie)
-				except:
-					print 'malformed authorization info <%s>' % cookie
-					request.error (400)
-					return
-				auth_info = string.split (decoded, ':')
-				if self.authorizer.authorize (auth_info):
-					self.pass_count.increment()
-					request.auth_info = auth_info
-					self.handler.handle_request (request)
-				else:
-					self.handle_unauthorized (request)
-			#elif scheme == 'digest':
-			#	print 'digest: ',AUTHORIZATION.group(2)
-			else:
-				print 'unknown/unsupported auth method: %s' % scheme
-				self.handle_unauthorized()
-		else:
-			# list both?  prefer one or the other?
-			# you could also use a 'nonce' here. [see below]
-			#auth = 'Basic realm="%s" Digest realm="%s"' % (self.realm, self.realm)
-			#nonce = self.make_nonce (request)
-			#auth = 'Digest realm="%s" nonce="%s"' % (self.realm, nonce)
-			#request['WWW-Authenticate'] = auth
-			#print 'sending header: %s' % request['WWW-Authenticate']
-			self.handle_unauthorized (request)
-		
-	def handle_unauthorized (self, request):
-		# We are now going to receive data that we want to ignore.
-		# to ignore the file data we're not interested in.
-		self.fail_count.increment()
-		request.channel.set_terminator (None)
-		request['Connection'] = 'close'
-		request['WWW-Authenticate'] = 'Basic realm="%s"' % self.realm
-		request.error (401)
-
-	def make_nonce (self, request):
-		"A digest-authentication <nonce>, constructed as suggested in RFC 2069"
-		ip = request.channel.server.ip
-		now = str (long (time.time()))[:-1]
-		private_key = str (id (self))
-		nonce = string.join ([ip, now, private_key], ':')
-		return self.apply_hash (nonce)
-
-	def apply_hash (self, s):
-		"Apply MD5 to a string <s>, then wrap it in base64 encoding."
-		m = md5.new()
-		m.update (s)
-		d = m.digest()
-		# base64.encodestring tacks on an extra linefeed.
-		return base64.encodestring (d)[:-1]
-
-	def status (self):
-		# Thanks to mwm@contessa.phone.net (Mike Meyer)
-		r = [
-			producers.simple_producer (
-				'<li>Authorization Extension : '
-				'<b>Unauthorized requests:</b> %s<ul>' % self.fail_count
-				)
-			]
-		if hasattr (self.handler, 'status'):
-			r.append (self.handler.status())
-		r.append (
-			producers.simple_producer ('</ul>')
-			)
-		return producers.composite_producer (
-			http_server.fifo (r)
-			)
-
+    def __init__ (self, dict, handler, realm='default'):
+        self.authorizer = dictionary_authorizer (dict)
+        self.handler = handler
+        self.realm = realm
+        self.pass_count = counter.counter()
+        self.fail_count = counter.counter()
+        
+    def match (self, request):
+            # by default, use the given handler's matcher
+        return self.handler.match (request)
+        
+    def handle_request (self, request):
+            # authorize a request before handling it...
+        scheme = get_header (AUTHORIZATION, request.header)
+        
+        if scheme:
+            scheme = string.lower (scheme)
+            if scheme == 'basic':
+                cookie = AUTHORIZATION.group(2)
+                try:
+                    decoded = base64.decodestring (cookie)
+                except:
+                    print 'malformed authorization info <%s>' % cookie
+                    request.error (400)
+                    return
+                auth_info = string.split (decoded, ':')
+                if self.authorizer.authorize (auth_info):
+                    self.pass_count.increment()
+                    request.auth_info = auth_info
+                    self.handler.handle_request (request)
+                else:
+                    self.handle_unauthorized (request)
+                    #elif scheme == 'digest':
+                    #	print 'digest: ',AUTHORIZATION.group(2)
+            else:
+                print 'unknown/unsupported auth method: %s' % scheme
+                self.handle_unauthorized()
+        else:
+                # list both?  prefer one or the other?
+                # you could also use a 'nonce' here. [see below]
+                #auth = 'Basic realm="%s" Digest realm="%s"' % (self.realm, self.realm)
+                #nonce = self.make_nonce (request)
+                #auth = 'Digest realm="%s" nonce="%s"' % (self.realm, nonce)
+                #request['WWW-Authenticate'] = auth
+                #print 'sending header: %s' % request['WWW-Authenticate']
+            self.handle_unauthorized (request)
+            
+    def handle_unauthorized (self, request):
+            # We are now going to receive data that we want to ignore.
+            # to ignore the file data we're not interested in.
+        self.fail_count.increment()
+        request.channel.set_terminator (None)
+        request['Connection'] = 'close'
+        request['WWW-Authenticate'] = 'Basic realm="%s"' % self.realm
+        request.error (401)
+        
+    def make_nonce (self, request):
+        "A digest-authentication <nonce>, constructed as suggested in RFC 2069"
+        ip = request.channel.server.ip
+        now = str (long (time.time()))[:-1]
+        private_key = str (id (self))
+        nonce = string.join ([ip, now, private_key], ':')
+        return self.apply_hash (nonce)
+        
+    def apply_hash (self, s):
+        "Apply MD5 to a string <s>, then wrap it in base64 encoding."
+        m = md5.new()
+        m.update (s)
+        d = m.digest()
+        # base64.encodestring tacks on an extra linefeed.
+        return base64.encodestring (d)[:-1]
+        
+    def status (self):
+            # Thanks to mwm@contessa.phone.net (Mike Meyer)
+        r = [
+                producers.simple_producer (
+                        '<li>Authorization Extension : '
+                        '<b>Unauthorized requests:</b> %s<ul>' % self.fail_count
+                        )
+                ]
+        if hasattr (self.handler, 'status'):
+            r.append (self.handler.status())
+        r.append (
+                producers.simple_producer ('</ul>')
+                )
+        return producers.composite_producer (
+                http_server.fifo (r)
+                )
+        
 class dictionary_authorizer:
-	def __init__ (self, dict):
-		self.dict = dict
-
-	def authorize (self, auth_info):
-		[username, password] = auth_info
-		if (self.dict.has_key (username)) and (self.dict[username] == password):
-			return 1
-		else:
-			return 0
-
+    def __init__ (self, dict):
+        self.dict = dict
+        
+    def authorize (self, auth_info):
+        [username, password] = auth_info
+        if (self.dict.has_key (username)) and (self.dict[username] == password):
+            return 1
+        else:
+            return 0
+            
 AUTHORIZATION = re.compile (
-	#               scheme  challenge
-	'Authorization: ([^ ]+) (.*)',
-	re.IGNORECASE
-	)
+        #               scheme  challenge
+        'Authorization: ([^ ]+) (.*)',
+        re.IGNORECASE
+        )

--- Updated File chat_server.py in package Zope2 --
--- chat_server.py	2001/04/25 19:07:30	1.2
+++ chat_server.py	2001/05/01 11:44:48	1.3
@@ -18,133 +18,133 @@
 
 class chat_channel (asynchat.async_chat):
 
-	def __init__ (self, server, sock, addr):
-		asynchat.async_chat.__init__ (self, sock)
-		self.server = server
-		self.addr = addr
-		self.set_terminator ('\r\n')
-		self.data = ''
-		self.nick = None
-		self.push ('nickname?: ')
-
-	def collect_incoming_data (self, data):
-		self.data = self.data + data
-
-	def found_terminator (self):
-		line = self.data
-		self.data = ''
-		if self.nick is None:
-			self.nick = string.split (line)[0]
-			if not self.nick:
-				self.nick = None
-				self.push ('huh? gimmee a nickname: ')
-			else:
-				self.greet()
-		else:
-			if not line:
-				pass
-			elif line[0] != '/':
-				self.server.push_line (self, line)
-			else:
-				self.handle_command (line)
-
-	def greet (self):
-		self.push ('Hello, %s\r\n' % self.nick)
-		num_channels = len(self.server.channels)-1
-		if num_channels == 0:
-			self.push ('[Kinda lonely in here... you\'re the only caller!]\r\n')
-		else:
-			self.push ('[There are %d other callers]\r\n' % (len(self.server.channels)-1))
-			nicks = map (lambda x: x.get_nick(), self.server.channels.keys())
-			self.push (string.join (nicks, '\r\n  ') + '\r\n')
-			self.server.push_line (self, '[joined]')
-
-	def handle_command (self, command):
-		import types
-		command_line = string.split(command)
-		name = 'cmd_%s' % command_line[0][1:]
-		if hasattr (self, name):
-			# make sure it's a method...
-			method = getattr (self, name)
-			if type(method) == type(self.handle_command):
-				method (command_line[1:])
-			else:
-				self.push ('unknown command: %s' % command_line[0])
-
-	def cmd_quit (self, args):
-		self.server.push_line (self, '[left]')
-		self.push ('Goodbye!\r\n')
-		self.close_when_done()
-
-	# alias for '/quit' - '/q'
-	cmd_q = cmd_quit
-
-	def push_line (self, nick, line):
-		self.push ('%s: %s\r\n' % (nick, line))
-
-	def handle_close (self):
-		self.close()
-
-	def close (self):
-		del self.server.channels[self]
-		asynchat.async_chat.close (self)
-
-	def get_nick (self):
-		if self.nick is not None:
-			return self.nick
-		else:
-			return 'Unknown'
-
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        self.nick = None
+        self.push ('nickname?: ')
+        
+    def collect_incoming_data (self, data):
+        self.data = self.data + data
+        
+    def found_terminator (self):
+        line = self.data
+        self.data = ''
+        if self.nick is None:
+            self.nick = string.split (line)[0]
+            if not self.nick:
+                self.nick = None
+                self.push ('huh? gimmee a nickname: ')
+            else:
+                self.greet()
+        else:
+            if not line:
+                pass
+            elif line[0] != '/':
+                self.server.push_line (self, line)
+            else:
+                self.handle_command (line)
+                
+    def greet (self):
+        self.push ('Hello, %s\r\n' % self.nick)
+        num_channels = len(self.server.channels)-1
+        if num_channels == 0:
+            self.push ('[Kinda lonely in here... you\'re the only caller!]\r\n')
+        else:
+            self.push ('[There are %d other callers]\r\n' % (len(self.server.channels)-1))
+            nicks = map (lambda x: x.get_nick(), self.server.channels.keys())
+            self.push (string.join (nicks, '\r\n  ') + '\r\n')
+            self.server.push_line (self, '[joined]')
+            
+    def handle_command (self, command):
+        import types
+        command_line = string.split(command)
+        name = 'cmd_%s' % command_line[0][1:]
+        if hasattr (self, name):
+                # make sure it's a method...
+            method = getattr (self, name)
+            if type(method) == type(self.handle_command):
+                method (command_line[1:])
+            else:
+                self.push ('unknown command: %s' % command_line[0])
+                
+    def cmd_quit (self, args):
+        self.server.push_line (self, '[left]')
+        self.push ('Goodbye!\r\n')
+        self.close_when_done()
+        
+        # alias for '/quit' - '/q'
+    cmd_q = cmd_quit
+    
+    def push_line (self, nick, line):
+        self.push ('%s: %s\r\n' % (nick, line))
+        
+    def handle_close (self):
+        self.close()
+        
+    def close (self):
+        del self.server.channels[self]
+        asynchat.async_chat.close (self)
+        
+    def get_nick (self):
+        if self.nick is not None:
+            return self.nick
+        else:
+            return 'Unknown'
+            
 class chat_server (asyncore.dispatcher):
 
-	SERVER_IDENT = 'Chat Server (V%s)' % VERSION
-
-	channel_class = chat_channel
-
-	spy = 1
-
-	def __init__ (self, ip='', port=8518):
-		self.port = port
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-		self.bind ((ip, port))
-		print '%s started on port %d' % (self.SERVER_IDENT, port)
-		self.listen (5)
-		self.channels = {}
-		self.count = 0
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		self.count = self.count + 1
-		print 'client #%d - %s:%d' % (self.count, addr[0], addr[1])
-		self.channels[self.channel_class (self, conn, addr)] = 1
-
-	def push_line (self, from_channel, line):
-		nick = from_channel.get_nick()
-		if self.spy:
-			print '%s: %s' % (nick, line)
-		for c in self.channels.keys():
-			if c is not from_channel:
-				c.push ('%s: %s\r\n' % (nick, line))
-
-	def status (self):
-		lines = [
-			'<h2>%s</h2>'						% self.SERVER_IDENT,
-			'<br>Listening on Port: %d'			% self.port,
-			'<br><b>Total Sessions:</b> %d'		% self.count,
-			'<br><b>Current Sessions:</b> %d'	% (len(self.channels))
-			]
-		return status_handler.lines_producer (lines)
-
-	def writable (self):
-		return 0
-
+    SERVER_IDENT = 'Chat Server (V%s)' % VERSION
+    
+    channel_class = chat_channel
+    
+    spy = 1
+    
+    def __init__ (self, ip='', port=8518):
+        self.port = port
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.bind ((ip, port))
+        print '%s started on port %d' % (self.SERVER_IDENT, port)
+        self.listen (5)
+        self.channels = {}
+        self.count = 0
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        self.count = self.count + 1
+        print 'client #%d - %s:%d' % (self.count, addr[0], addr[1])
+        self.channels[self.channel_class (self, conn, addr)] = 1
+        
+    def push_line (self, from_channel, line):
+        nick = from_channel.get_nick()
+        if self.spy:
+            print '%s: %s' % (nick, line)
+        for c in self.channels.keys():
+            if c is not from_channel:
+                c.push ('%s: %s\r\n' % (nick, line))
+                
+    def status (self):
+        lines = [
+                '<h2>%s</h2>'						% self.SERVER_IDENT,
+                '<br>Listening on Port: %d'			% self.port,
+                '<br><b>Total Sessions:</b> %d'		% self.count,
+                '<br><b>Current Sessions:</b> %d'	% (len(self.channels))
+                ]
+        return status_handler.lines_producer (lines)
+        
+    def writable (self):
+        return 0
+        
 if __name__ == '__main__':
-	import sys
-
-	if len(sys.argv) > 1:
-		port = string.atoi (sys.argv[1])
-	else:
-		port = 8518
-
-	s = chat_server ('', port)
-	asyncore.loop()
+    import sys
+    
+    if len(sys.argv) > 1:
+        port = string.atoi (sys.argv[1])
+    else:
+        port = 8518
+        
+    s = chat_server ('', port)
+    asyncore.loop()

--- Updated File continuation.py in package Zope2 --
--- continuation.py	2001/04/25 19:07:30	1.2
+++ continuation.py	2001/05/01 11:44:48	1.3
@@ -4,24 +4,24 @@
 
 class continuation:
 
-	'Package up a continuation as an object.'
-	'Also a convenient place to store state.'
-
-	def __init__ (self, fun, *args):
-		self.funs = [(fun, args)]
-
-	def __call__ (self, *args):
-		fun, init_args = self.funs[0]
-		self.funs = self.funs[1:]
-		if self.funs:
-			apply (fun, (self,)+ init_args + args)
-		else:
-			apply (fun, init_args + args)
-
-	def chain (self, fun, *args):
-		self.funs.insert (0, (fun, args))
-		return self
-
-	def abort (self, *args):
-		fun, init_args = self.funs[-1]
-		apply (fun, init_args + args)
+    'Package up a continuation as an object.'
+    'Also a convenient place to store state.'
+    
+    def __init__ (self, fun, *args):
+        self.funs = [(fun, args)]
+        
+    def __call__ (self, *args):
+        fun, init_args = self.funs[0]
+        self.funs = self.funs[1:]
+        if self.funs:
+            apply (fun, (self,)+ init_args + args)
+        else:
+            apply (fun, init_args + args)
+            
+    def chain (self, fun, *args):
+        self.funs.insert (0, (fun, args))
+        return self
+        
+    def abort (self, *args):
+        fun, init_args = self.funs[-1]
+        apply (fun, init_args + args)

--- Updated File counter.py in package Zope2 --
--- counter.py	2001/04/25 19:07:30	1.6
+++ counter.py	2001/05/01 11:44:48	1.7
@@ -13,35 +13,35 @@
 # will overflow.
 
 class counter:
-	"general-purpose counter"
-
-	def __init__ (self, initial_value=0):
-		self.value = initial_value
-	
-	def increment (self, delta=1):
-		result = self.value
-		try:
-			self.value = self.value + delta
-		except OverflowError:
-			self.value = long(self.value) + delta
-		return result
-
-	def decrement (self, delta=1):
-		result = self.value
-		try:
-			self.value = self.value - delta
-		except OverflowError:
-			self.value = long(self.value) - delta
-		return result
-
-	def as_long (self):
-		return long(self.value)
-
-	def __nonzero__ (self):
-		return self.value != 0
-
-	def __repr__ (self):
-		return '<counter value=%s at %x>' % (self.value, id(self))
-
-	def __str__ (self):
-		return str(long(self.value))[:-1]
+    "general-purpose counter"
+    
+    def __init__ (self, initial_value=0):
+        self.value = initial_value
+        
+    def increment (self, delta=1):
+        result = self.value
+        try:
+            self.value = self.value + delta
+        except OverflowError:
+            self.value = long(self.value) + delta
+        return result
+        
+    def decrement (self, delta=1):
+        result = self.value
+        try:
+            self.value = self.value - delta
+        except OverflowError:
+            self.value = long(self.value) - delta
+        return result
+        
+    def as_long (self):
+        return long(self.value)
+        
+    def __nonzero__ (self):
+        return self.value != 0
+        
+    def __repr__ (self):
+        return '<counter value=%s at %x>' % (self.value, id(self))
+        
+    def __str__ (self):
+        return str(long(self.value))[:-1]

--- Updated File default_handler.py in package Zope2 --
--- default_handler.py	2001/04/25 19:07:30	1.6
+++ default_handler.py	2001/05/01 11:44:48	1.7
@@ -41,177 +41,177 @@
 
 class default_handler:
 
-	valid_commands = ['get', 'head']
-
-	IDENT = 'Default HTTP Request Handler'
-
-	# Pathnames that are tried when a URI resolves to a directory name
-	directory_defaults = [
-		'index.html',
-		'default.html'
-		]
-
-	default_file_producer = producers.file_producer
-
-	def __init__ (self, filesystem):
-		self.filesystem = filesystem
-		# count total hits
-		self.hit_counter = counter()
-		# count file deliveries
-		self.file_counter = counter()
-		# count cache hits
-		self.cache_counter = counter()
-
-	hit_counter = 0
-
-	def __repr__ (self):
-		return '<%s (%s hits) at %x>' % (
-			self.IDENT,
-			self.hit_counter,
-			id (self)
-			)
-
-	# always match, since this is a default
-	def match (self, request):
-		return 1
-
-	# handle a file request, with caching.
-
-	def handle_request (self, request):
-
-		if request.command not in self.valid_commands:
-			request.error (400) # bad request
-			return
-
-		self.hit_counter.increment()
-
-		path, params, query, fragment = request.split_uri()
-
-		if '%' in path:
-			path = unquote (path)
-
-		# strip off all leading slashes
-		while path and path[0] == '/':
-			path = path[1:]
-
-		if self.filesystem.isdir (path):
-			if path and path[-1] != '/':
-				request['Location'] = 'http://%s/%s/' % (
-					request.channel.server.server_name,
-					path
-					)
-				request.error (301)
-				return
-
-			# we could also generate a directory listing here,
-			# may want to move this into another method for that
-			# purpose
-			found = 0
-			if path and path[-1] != '/':
-				path = path + '/'
-			for default in self.directory_defaults:
-				p = path + default
-				if self.filesystem.isfile (p):
-					path = p
-					found = 1
-					break
-			if not found:
-				request.error (404) # Not Found 
-				return
-
-		elif not self.filesystem.isfile (path):
-			request.error (404) # Not Found
-			return
-
-		file_length = self.filesystem.stat (path)[stat.ST_SIZE]
-
-		ims = get_header_match (IF_MODIFIED_SINCE, request.header)
-
-		length_match = 1
-		if ims:
-			length = ims.group (4)
-			if length:
-				try:
-					length = string.atoi (length)
-					if length != file_length:
-						length_match = 0
-				except:
-					pass
-
-		ims_date = 0
-
-		if ims:
-			ims_date = http_date.parse_http_date (ims.group (1))
-
-		try:
-			mtime = self.filesystem.stat (path)[stat.ST_MTIME]
-		except:
-			request.error (404)
-			return
-
-		if length_match and ims_date:
-			if mtime <= ims_date:
-				request.reply_code = 304
-				request.done()
-				self.cache_counter.increment()
-				return
-		try:
-			file = self.filesystem.open (path, 'rb')
-		except IOError:
-			request.error (404)
-			return
-
-		request['Last-Modified'] = http_date.build_http_date (mtime)
-		request['Content-Length'] = file_length
-		self.set_content_type (path, request)
-
-		if request.command == 'get':
-			request.push (self.default_file_producer (file))
-
-		self.file_counter.increment()
-		request.done()
-
-	def set_content_type (self, path, request):
-		ext = string.lower (get_extension (path))
-		if mime_type_table.content_type_map.has_key (ext):
-			request['Content-Type'] = mime_type_table.content_type_map[ext]
-		else:
-			# TODO: test a chunk off the front of the file for 8-bit
-			# characters, and use application/octet-stream instead.
-			request['Content-Type'] = 'text/plain'
-
-	def status (self):
-		return producers.simple_producer (
-			'<li>%s' % status_handler.html_repr (self)
-			+ '<ul>'
-			+ '  <li><b>Total Hits:</b> %s'			% self.hit_counter
-			+ '  <li><b>Files Delivered:</b> %s'	% self.file_counter
-			+ '  <li><b>Cache Hits:</b> %s'			% self.cache_counter
-			+ '</ul>'
-			)
-
-# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
-# to this header.  I suppose it's purpose is to avoid the overhead
-# of parsing dates...
+    valid_commands = ['get', 'head']
+    
+    IDENT = 'Default HTTP Request Handler'
+    
+    # Pathnames that are tried when a URI resolves to a directory name
+    directory_defaults = [
+            'index.html',
+            'default.html'
+            ]
+    
+    default_file_producer = producers.file_producer
+    
+    def __init__ (self, filesystem):
+        self.filesystem = filesystem
+        # count total hits
+        self.hit_counter = counter()
+        # count file deliveries
+        self.file_counter = counter()
+        # count cache hits
+        self.cache_counter = counter()
+        
+    hit_counter = 0
+    
+    def __repr__ (self):
+        return '<%s (%s hits) at %x>' % (
+                self.IDENT,
+                self.hit_counter,
+                id (self)
+                )
+        
+        # always match, since this is a default
+    def match (self, request):
+        return 1
+        
+        # handle a file request, with caching.
+        
+    def handle_request (self, request):
+    
+        if request.command not in self.valid_commands:
+            request.error (400) # bad request
+            return
+            
+        self.hit_counter.increment()
+        
+        path, params, query, fragment = request.split_uri()
+        
+        if '%' in path:
+            path = unquote (path)
+            
+            # strip off all leading slashes
+        while path and path[0] == '/':
+            path = path[1:]
+            
+        if self.filesystem.isdir (path):
+            if path and path[-1] != '/':
+                request['Location'] = 'http://%s/%s/' % (
+                        request.channel.server.server_name,
+                        path
+                        )
+                request.error (301)
+                return
+                
+                # we could also generate a directory listing here,
+                # may want to move this into another method for that
+                # purpose
+            found = 0
+            if path and path[-1] != '/':
+                path = path + '/'
+            for default in self.directory_defaults:
+                p = path + default
+                if self.filesystem.isfile (p):
+                    path = p
+                    found = 1
+                    break
+            if not found:
+                request.error (404) # Not Found 
+                return
+                
+        elif not self.filesystem.isfile (path):
+            request.error (404) # Not Found
+            return
+            
+        file_length = self.filesystem.stat (path)[stat.ST_SIZE]
+        
+        ims = get_header_match (IF_MODIFIED_SINCE, request.header)
+        
+        length_match = 1
+        if ims:
+            length = ims.group (4)
+            if length:
+                try:
+                    length = string.atoi (length)
+                    if length != file_length:
+                        length_match = 0
+                except:
+                    pass
+                    
+        ims_date = 0
+        
+        if ims:
+            ims_date = http_date.parse_http_date (ims.group (1))
+            
+        try:
+            mtime = self.filesystem.stat (path)[stat.ST_MTIME]
+        except:
+            request.error (404)
+            return
+            
+        if length_match and ims_date:
+            if mtime <= ims_date:
+                request.reply_code = 304
+                request.done()
+                self.cache_counter.increment()
+                return
+        try:
+            file = self.filesystem.open (path, 'rb')
+        except IOError:
+            request.error (404)
+            return
+            
+        request['Last-Modified'] = http_date.build_http_date (mtime)
+        request['Content-Length'] = file_length
+        self.set_content_type (path, request)
+        
+        if request.command == 'get':
+            request.push (self.default_file_producer (file))
+            
+        self.file_counter.increment()
+        request.done()
+        
+    def set_content_type (self, path, request):
+        ext = string.lower (get_extension (path))
+        if mime_type_table.content_type_map.has_key (ext):
+            request['Content-Type'] = mime_type_table.content_type_map[ext]
+        else:
+                # TODO: test a chunk off the front of the file for 8-bit
+                # characters, and use application/octet-stream instead.
+            request['Content-Type'] = 'text/plain'
+            
+    def status (self):
+        return producers.simple_producer (
+                '<li>%s' % status_handler.html_repr (self)
+                + '<ul>'
+                + '  <li><b>Total Hits:</b> %s'			% self.hit_counter
+                + '  <li><b>Files Delivered:</b> %s'	% self.file_counter
+                + '  <li><b>Cache Hits:</b> %s'			% self.cache_counter
+                + '</ul>'
+                )
+        
+        # HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
+        # to this header.  I suppose it's purpose is to avoid the overhead
+        # of parsing dates...
 IF_MODIFIED_SINCE = re.compile (
-	'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
-	re.IGNORECASE
-	)
+        'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
+        re.IGNORECASE
+        )
 
 USER_AGENT = re.compile ('User-Agent: (.*)', re.IGNORECASE)
 
 CONTENT_TYPE = re.compile (
-	r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
-	re.IGNORECASE
-	)
+        r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
+        re.IGNORECASE
+        )
 
 get_header = http_server.get_header
 get_header_match = http_server.get_header_match
 
 def get_extension (path):
-	dirsep = string.rfind (path, '/')
-	dotsep = string.rfind (path, '.')
-	if dotsep > dirsep:
-		return path[dotsep+1:]
-	else:
-		return ''
+    dirsep = string.rfind (path, '/')
+    dotsep = string.rfind (path, '.')
+    if dotsep > dirsep:
+        return path[dotsep+1:]
+    else:
+        return ''

--- Updated File event_loop.py in package Zope2 --
--- event_loop.py	2001/04/25 19:07:30	1.2
+++ event_loop.py	2001/05/01 11:44:48	1.3
@@ -20,74 +20,74 @@
 
 class event_loop:
 
-	def __init__ (self):
-		self.events = []
-		self.num_channels = 0
-		self.max_channels = 0
-
-	def go (self, timeout=30.0, granularity=15):
-		global socket_map
-		last_event_check = 0
-		while socket_map:
-			now = int(time.time())
-			if (now - last_event_check) >= granularity:
-				last_event_check = now
-				fired = []
-				# yuck. i want my lisp.
-				i = j = 0
-				while i < len(self.events):
-					when, what = self.events[i]
-					if now >= when:
-						fired.append (what)
-						j = i + 1
-					else:
-						break
-					i = i + 1
-				if fired:
-					self.events = self.events[j:]
-					for what in fired:
-						what (self, now)
-			# sample the number of channels
-			n = len(asyncore.socket_map)
-			self.num_channels = n
-			if n > self.max_channels:
-				self.max_channels = n
-			asyncore.poll (timeout)
-			
-	def schedule (self, delta, callback):
-		now = int (time.time())
-		bisect.insort (self.events, (now + delta, callback))
-
-	def __len__ (self):
-		return len(self.events)
-
+    def __init__ (self):
+        self.events = []
+        self.num_channels = 0
+        self.max_channels = 0
+        
+    def go (self, timeout=30.0, granularity=15):
+        global socket_map
+        last_event_check = 0
+        while socket_map:
+            now = int(time.time())
+            if (now - last_event_check) >= granularity:
+                last_event_check = now
+                fired = []
+                # yuck. i want my lisp.
+                i = j = 0
+                while i < len(self.events):
+                    when, what = self.events[i]
+                    if now >= when:
+                        fired.append (what)
+                        j = i + 1
+                    else:
+                        break
+                    i = i + 1
+                if fired:
+                    self.events = self.events[j:]
+                    for what in fired:
+                        what (self, now)
+                        # sample the number of channels
+            n = len(asyncore.socket_map)
+            self.num_channels = n
+            if n > self.max_channels:
+                self.max_channels = n
+            asyncore.poll (timeout)
+            
+    def schedule (self, delta, callback):
+        now = int (time.time())
+        bisect.insort (self.events, (now + delta, callback))
+        
+    def __len__ (self):
+        return len(self.events)
+        
 class test (asyncore.dispatcher):
-	
-	def __init__ (self):
-		asyncore.dispatcher.__init__ (self)
-
-	def handle_connect (self):
-		print 'Connected!'
-
-	def writable (self):
-		return not self.connected
-
-	def connect_timeout_callback (self, event_loop, when):
-		if not self.connected:
-			print 'Timeout on connect'
-			self.close()
 
-	def periodic_thing_callback (self, event_loop, when):
-		print 'A Periodic Event has Occurred!'
-		# re-schedule it.
-		event_loop.schedule (15, self.periodic_thing_callback)
-		
+    def __init__ (self):
+        asyncore.dispatcher.__init__ (self)
+        
+    def handle_connect (self):
+        print 'Connected!'
+        
+    def writable (self):
+        return not self.connected
+        
+    def connect_timeout_callback (self, event_loop, when):
+        if not self.connected:
+            print 'Timeout on connect'
+            self.close()
+            
+    def periodic_thing_callback (self, event_loop, when):
+        print 'A Periodic Event has Occurred!'
+        # re-schedule it.
+        event_loop.schedule (15, self.periodic_thing_callback)
+        
 if __name__ == '__main__':
-	import socket
-	el = event_loop()
-	t = test ()
-	t.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-	el.schedule (10, t.connect_timeout_callback)
-	el.schedule (15, t.periodic_thing_callback)
-	t.connect (('squirl', 80))
-	el.go(1.0)
+    import socket
+    el = event_loop()
+    t = test ()
+    t.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+    el.schedule (10, t.connect_timeout_callback)
+    el.schedule (15, t.periodic_thing_callback)
+    t.connect (('squirl', 80))
+    el.go(1.0)

--- Updated File fifo.py in package Zope2 --
--- fifo.py	2001/04/25 19:07:30	1.2
+++ fifo.py	2001/05/01 11:44:48	1.3
@@ -2,202 +2,202 @@
 
 # fifo, implemented with lisp-style pairs.
 # [quick translation of scheme48/big/queue.scm]
-	
-class fifo:
-
-	def __init__ (self):
-		self.head, self.tail = None, None
-		self.length = 0
-		self.node_cache = None
-		
-	def __len__ (self):
-		return self.length
-
-	def push (self, v):
-		self.node_cache = None
-		self.length = self.length + 1
-		p = [v, None]
-		if self.head is None:
-			self.head = p
-		else:
-			self.tail[1] = p
-		self.tail = p
-
-	def pop (self):
-		self.node_cache = None
-		pair = self.head
-		if pair is None:
-			raise ValueError, "pop() from an empty queue"
-		else:
-			self.length = self.length - 1
-			[value, next] = pair
-			self.head = next
-			if next is None:
-				self.tail = None
-			return value
-
-	def first (self):
-		if self.head is None:
-			raise ValueError, "first() of an empty queue"
-		else:
-			return self.head[0]
-
-	def push_front (self, thing):
-		self.node_cache = None
-		self.length = self.length + 1
-		old_head = self.head
-		new_head = [thing, old_head]
-		self.head = new_head
-		if old_head is None:
-			self.tail = new_head
 
-	def _nth (self, n):
-		i = n
-		h = self.head
-		while i:
-			h = h[1]
-			i = i - 1
-		self.node_cache = n, h[1]
-		return h[0]
-
-	def __getitem__ (self, index):
-		if (index < 0) or (index >= self.length):
-			raise IndexError, "index out of range"
-		else:
-			if self.node_cache:
-				j, h = self.node_cache
-				if j == index - 1:
-					result = h[0]
-					self.node_cache = index, h[1]
-					return result
-				else:
-					return self._nth (index)
-			else:
-				return self._nth (index)
+class fifo:
 
-			
+    def __init__ (self):
+        self.head, self.tail = None, None
+        self.length = 0
+        self.node_cache = None
+        
+    def __len__ (self):
+        return self.length
+        
+    def push (self, v):
+        self.node_cache = None
+        self.length = self.length + 1
+        p = [v, None]
+        if self.head is None:
+            self.head = p
+        else:
+            self.tail[1] = p
+        self.tail = p
+        
+    def pop (self):
+        self.node_cache = None
+        pair = self.head
+        if pair is None:
+            raise ValueError, "pop() from an empty queue"
+        else:
+            self.length = self.length - 1
+            [value, next] = pair
+            self.head = next
+            if next is None:
+                self.tail = None
+            return value
+            
+    def first (self):
+        if self.head is None:
+            raise ValueError, "first() of an empty queue"
+        else:
+            return self.head[0]
+            
+    def push_front (self, thing):
+        self.node_cache = None
+        self.length = self.length + 1
+        old_head = self.head
+        new_head = [thing, old_head]
+        self.head = new_head
+        if old_head is None:
+            self.tail = new_head
+            
+    def _nth (self, n):
+        i = n
+        h = self.head
+        while i:
+            h = h[1]
+            i = i - 1
+        self.node_cache = n, h[1]
+        return h[0]
+        
+    def __getitem__ (self, index):
+        if (index < 0) or (index >= self.length):
+            raise IndexError, "index out of range"
+        else:
+            if self.node_cache:
+                j, h = self.node_cache
+                if j == index - 1:
+                    result = h[0]
+                    self.node_cache = index, h[1]
+                    return result
+                else:
+                    return self._nth (index)
+            else:
+                return self._nth (index)
+                
+                
 class protected_fifo:
-	
-	def __init__ (self, lock=None):
-		if lock is None:
-			import thread
-			self.lock = thread.allocate_lock()
-		else:
-			self.lock = lock
-		self.fifo = fifo.fifo()
-
-	def push (self, item):
-		try:
-			self.lock.acquire()
-			self.fifo.push (item)
-		finally:
-			self.lock.release()
-
-	enqueue = push
-
-	def pop (self):
-		try:
-			self.lock.acquire()
-			return self.fifo.pop()
-		finally:
-			self.lock.release()
 
-	dequeue = pop
-	
-	def __len__ (self):
-		try:
-			self.lock.acquire()
-			return len(self.queue)
-		finally:
-			self.lock.release()
-
+    def __init__ (self, lock=None):
+        if lock is None:
+            import thread
+            self.lock = thread.allocate_lock()
+        else:
+            self.lock = lock
+        self.fifo = fifo.fifo()
+        
+    def push (self, item):
+        try:
+            self.lock.acquire()
+            self.fifo.push (item)
+        finally:
+            self.lock.release()
+            
+    enqueue = push
+    
+    def pop (self):
+        try:
+            self.lock.acquire()
+            return self.fifo.pop()
+        finally:
+            self.lock.release()
+            
+    dequeue = pop
+    
+    def __len__ (self):
+        try:
+            self.lock.acquire()
+            return len(self.queue)
+        finally:
+            self.lock.release()
+            
 class output_fifo:
-	
-	EMBEDDED	= 'embedded'
-	EOF			= 'eof'
-	TRIGGER		= 'trigger'
-
-	def __init__ (self):
-		# containment, not inheritance
-		self.fifo = fifo()
-		self._embedded = None
-
-	def push_embedded (self, fifo):
-		# push embedded fifo
-		fifo.parent = self # CYCLE
-		self.fifo.push ((self.EMBEDDED, fifo))
-
-	def push_eof (self):
-		# push end-of-fifo
-		self.fifo.push ((self.EOF, None))
-
-	def push_trigger (self, thunk):
-		self.fifo.push ((self.TRIGGER, thunk))
-
-	def push (self, item):
-		# item should be a producer or string
-		self.fifo.push (item)
-
-	# 'length' is an inaccurate term.  we should
-	# probably use an 'empty' method instead.
-	def __len__ (self):
-		if self._embedded is None:
-			return len(self.fifo)
-		else:
-			return len(self._embedded)
-
-	def empty (self):
-		return len(self) == 0
-
-	def first (self):
-		if self._embedded is None:
-			return self.fifo.first()
-		else:
-			return self._embedded.first()
-
-	def pop (self):
-		if self._embedded is not None:
-			return self._embedded.pop()
-		else:
-			result = self.fifo.pop()
-			# unset self._embedded
-			self._embedded = None
-			# check for special items in the front
-			if len(self.fifo):
-				front = self.fifo.first()
-				if type(front) is type(()):
-					# special
-					kind, value = front
-					if kind is self.EMBEDDED:
-						self._embedded = value
-					elif kind is self.EOF:
-						# break the cycle
-						parent = self.parent
-						self.parent = None
-						# pop from parent
-						parent._embedded = None
-					elif kind is self.TRIGGER:
-						# call the trigger thunk
-						value()
-					# remove the special
-					self.fifo.pop()
-			# return the originally popped result
-			return result
 
+    EMBEDDED	= 'embedded'
+    EOF			= 'eof'
+    TRIGGER		= 'trigger'
+    
+    def __init__ (self):
+            # containment, not inheritance
+        self.fifo = fifo()
+        self._embedded = None
+        
+    def push_embedded (self, fifo):
+            # push embedded fifo
+        fifo.parent = self # CYCLE
+        self.fifo.push ((self.EMBEDDED, fifo))
+        
+    def push_eof (self):
+            # push end-of-fifo
+        self.fifo.push ((self.EOF, None))
+        
+    def push_trigger (self, thunk):
+        self.fifo.push ((self.TRIGGER, thunk))
+        
+    def push (self, item):
+            # item should be a producer or string
+        self.fifo.push (item)
+        
+        # 'length' is an inaccurate term.  we should
+        # probably use an 'empty' method instead.
+    def __len__ (self):
+        if self._embedded is None:
+            return len(self.fifo)
+        else:
+            return len(self._embedded)
+            
+    def empty (self):
+        return len(self) == 0
+        
+    def first (self):
+        if self._embedded is None:
+            return self.fifo.first()
+        else:
+            return self._embedded.first()
+            
+    def pop (self):
+        if self._embedded is not None:
+            return self._embedded.pop()
+        else:
+            result = self.fifo.pop()
+            # unset self._embedded
+            self._embedded = None
+            # check for special items in the front
+            if len(self.fifo):
+                front = self.fifo.first()
+                if type(front) is type(()):
+                        # special
+                    kind, value = front
+                    if kind is self.EMBEDDED:
+                        self._embedded = value
+                    elif kind is self.EOF:
+                            # break the cycle
+                        parent = self.parent
+                        self.parent = None
+                        # pop from parent
+                        parent._embedded = None
+                    elif kind is self.TRIGGER:
+                            # call the trigger thunk
+                        value()
+                        # remove the special
+                    self.fifo.pop()
+                    # return the originally popped result
+            return result
+            
 def test_embedded():
-	of = output_fifo()
-	f2 = output_fifo()
-	f3 = output_fifo()
-	of.push ('one')
-	of.push_embedded (f2)
-	f2.push ('two')
-	f3.push ('three')
-	f3.push ('four')
-	f2.push_embedded (f3)
-	f3.push_eof()
-	f2.push ('five')
-	f2.push_eof()
-	of.push ('six')
-	of.push ('seven')
-	while 1:
-		print of.pop()
+    of = output_fifo()
+    f2 = output_fifo()
+    f3 = output_fifo()
+    of.push ('one')
+    of.push_embedded (f2)
+    f2.push ('two')
+    f3.push ('three')
+    f3.push ('four')
+    f2.push_embedded (f3)
+    f3.push_eof()
+    f2.push ('five')
+    f2.push_eof()
+    of.push ('six')
+    of.push ('seven')
+    while 1:
+        print of.pop()

--- Updated File filesys.py in package Zope2 --
--- filesys.py	2001/04/27 18:25:42	1.8
+++ filesys.py	2001/05/01 11:44:48	1.9
@@ -15,450 +15,450 @@
 # return a producer.
 
 class abstract_filesystem:
-	def __init__ (self):
-		pass
-
-	def current_directory (self):
-		"Return a string representing the current directory."
-		pass
-
-	def listdir (self, path, long=0):
-		"""Return a listing of the directory at 'path' The empty string
-		indicates the current directory.  If 'long' is set, instead
-		return a list of (name, stat_info) tuples
-		"""
-		pass
-
-	def open (self, path, mode):
-		"Return an open file object"
-		pass
-
-	def stat (self, path):
-		"Return the equivalent of os.stat() on the given path."
-		pass
-
-	def isdir (self, path):
-		"Does the path represent a directory?"
-		pass
-
-	def isfile (self, path):
-		"Does the path represent a plain file?"
-		pass
-
-	def cwd (self, path):
-		"Change the working directory."
-		pass
-
-	def cdup (self):
-		"Change to the parent of the current directory."
-		pass
-
-
-	def longify (self, path):
-		"""Return a 'long' representation of the filename
-		[for the output of the LIST command]"""
-		pass
-
-# standard wrapper around a unix-like filesystem, with a 'false root'
-# capability.
-
-# security considerations: can symbolic links be used to 'escape' the
-# root?  should we allow it?  if not, then we could scan the
-# filesystem on startup, but that would not help if they were added
-# later.  We will probably need to check for symlinks in the cwd method.
-
-# what to do if wd is an invalid directory?
-
+    def __init__ (self):
+        pass
+        
+    def current_directory (self):
+        "Return a string representing the current directory."
+        pass
+        
+    def listdir (self, path, long=0):
+        """Return a listing of the directory at 'path' The empty string
+        indicates the current directory.  If 'long' is set, instead
+        return a list of (name, stat_info) tuples
+        """
+        pass
+        
+    def open (self, path, mode):
+        "Return an open file object"
+        pass
+        
+    def stat (self, path):
+        "Return the equivalent of os.stat() on the given path."
+        pass
+        
+    def isdir (self, path):
+        "Does the path represent a directory?"
+        pass
+        
+    def isfile (self, path):
+        "Does the path represent a plain file?"
+        pass
+        
+    def cwd (self, path):
+        "Change the working directory."
+        pass
+        
+    def cdup (self):
+        "Change to the parent of the current directory."
+        pass
+        
+        
+    def longify (self, path):
+        """Return a 'long' representation of the filename
+        [for the output of the LIST command]"""
+        pass
+        
+        # standard wrapper around a unix-like filesystem, with a 'false root'
+        # capability.
+        
+        # security considerations: can symbolic links be used to 'escape' the
+        # root?  should we allow it?  if not, then we could scan the
+        # filesystem on startup, but that would not help if they were added
+        # later.  We will probably need to check for symlinks in the cwd method.
+        
+        # what to do if wd is an invalid directory?
+        
 import os,re
 import stat
 import string
 
 def safe_stat (path):
-	try:
-		return (path, os.stat (path))
-	except:
-		return None
-
+    try:
+        return (path, os.stat (path))
+    except:
+        return None
+        
 import glob
 
 class os_filesystem:
-	path_module = os.path
-
-	# set this to zero if you want to disable pathname globbing.
-	# [we currently don't glob, anyway]
-	do_globbing = 1
-
-	def __init__ (self, root, wd='/'):
-		self.root = root
-		self.wd = wd
-
-	def current_directory (self):
-		return self.wd
-
-	def isfile (self, path):
-		p = self.normalize (self.path_module.join (self.wd, path))
-		return self.path_module.isfile (self.translate(p))
-
-	def isdir (self, path):
-		p = self.normalize (self.path_module.join (self.wd, path))
-		return self.path_module.isdir (self.translate(p))
-
-	def cwd (self, path):
-		p = self.normalize (self.path_module.join (self.wd, path))
-		translated_path = self.translate(p)
-		if not self.path_module.isdir (translated_path):
-			return 0
-		else:
-			old_dir = os.getcwd()
-			# temporarily change to that directory, in order
-			# to see if we have permission to do so.
-			try:
-				can = 0
-				try:
-					os.chdir (translated_path)
-					can = 1
-					self.wd = p
-				except:
-					pass
-			finally:
-				if can:
-					os.chdir (old_dir)
-			return can
-
-	def cdup (self):
-		return self.cwd ('..')
-
-	def listdir (self, path, long=0):
-		p = self.translate (path)
-		# I think we should glob, but limit it to the current
-		# directory only.
-		ld = os.listdir (p)
-		if not long:
-			return list_producer (ld, 0, None)
-		else:
-			old_dir = os.getcwd()
-			try:
-				os.chdir (p)
-				# if os.stat fails we ignore that file.
-				result = filter (None, map (safe_stat, ld))
-			finally:
-				os.chdir (old_dir)
-			return list_producer (result, 1, self.longify)
-
-	# TODO: implement a cache w/timeout for stat()
-	def stat (self, path):
-		p = self.translate (path)
-		return os.stat (p)
-
-	def open (self, path, mode):
-		p = self.translate (path)
-		return open (p, mode)
-
-	def unlink (self, path):
-		p = self.translate (path)
-		return os.unlink (p)
-
-	def mkdir (self, path):
-		p = self.translate (path)
-		return os.mkdir (p)
-
-	def rmdir (self, path):
-		p = self.translate (path)
-		return os.rmdir (p)
-
-	# utility methods
-	def normalize (self, path):
-		# watch for the ever-sneaky '/+' path element
-		path = re.sub ('/+', '/', path)
-		p = self.path_module.normpath (path)
-		# remove 'dangling' cdup's.
-		if len(p) > 2 and p[:3] == '/..':
-			p = '/'
-		return p
-
-	def translate (self, path):
-		# we need to join together three separate
-		# path components, and do it safely.
-		# <real_root>/<current_directory>/<path>
-		# use the operating system's path separator.
-		path = string.join (string.split (path, '/'), os.sep)
-		p = self.normalize (self.path_module.join (self.wd, path))
-		p = self.normalize (self.path_module.join (self.root, p[1:]))
-		return p
-
-	def longify (self, (path, stat_info)):
-		return unix_longify (path, stat_info)
-
-	def __repr__ (self):
-		return '<unix-style fs root:%s wd:%s>' % (
-			self.root,
-			self.wd
-			)
-
+    path_module = os.path
+    
+    # set this to zero if you want to disable pathname globbing.
+    # [we currently don't glob, anyway]
+    do_globbing = 1
+    
+    def __init__ (self, root, wd='/'):
+        self.root = root
+        self.wd = wd
+        
+    def current_directory (self):
+        return self.wd
+        
+    def isfile (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        return self.path_module.isfile (self.translate(p))
+        
+    def isdir (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        return self.path_module.isdir (self.translate(p))
+        
+    def cwd (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        translated_path = self.translate(p)
+        if not self.path_module.isdir (translated_path):
+            return 0
+        else:
+            old_dir = os.getcwd()
+            # temporarily change to that directory, in order
+            # to see if we have permission to do so.
+            try:
+                can = 0
+                try:
+                    os.chdir (translated_path)
+                    can = 1
+                    self.wd = p
+                except:
+                    pass
+            finally:
+                if can:
+                    os.chdir (old_dir)
+            return can
+            
+    def cdup (self):
+        return self.cwd ('..')
+        
+    def listdir (self, path, long=0):
+        p = self.translate (path)
+        # I think we should glob, but limit it to the current
+        # directory only.
+        ld = os.listdir (p)
+        if not long:
+            return list_producer (ld, 0, None)
+        else:
+            old_dir = os.getcwd()
+            try:
+                os.chdir (p)
+                # if os.stat fails we ignore that file.
+                result = filter (None, map (safe_stat, ld))
+            finally:
+                os.chdir (old_dir)
+            return list_producer (result, 1, self.longify)
+            
+            # TODO: implement a cache w/timeout for stat()
+    def stat (self, path):
+        p = self.translate (path)
+        return os.stat (p)
+        
+    def open (self, path, mode):
+        p = self.translate (path)
+        return open (p, mode)
+        
+    def unlink (self, path):
+        p = self.translate (path)
+        return os.unlink (p)
+        
+    def mkdir (self, path):
+        p = self.translate (path)
+        return os.mkdir (p)
+        
+    def rmdir (self, path):
+        p = self.translate (path)
+        return os.rmdir (p)
+        
+        # utility methods
+    def normalize (self, path):
+            # watch for the ever-sneaky '/+' path element
+        path = re.sub ('/+', '/', path)
+        p = self.path_module.normpath (path)
+        # remove 'dangling' cdup's.
+        if len(p) > 2 and p[:3] == '/..':
+            p = '/'
+        return p
+        
+    def translate (self, path):
+            # we need to join together three separate
+            # path components, and do it safely.
+            # <real_root>/<current_directory>/<path>
+            # use the operating system's path separator.
+        path = string.join (string.split (path, '/'), os.sep)
+        p = self.normalize (self.path_module.join (self.wd, path))
+        p = self.normalize (self.path_module.join (self.root, p[1:]))
+        return p
+        
+    def longify (self, (path, stat_info)):
+        return unix_longify (path, stat_info)
+        
+    def __repr__ (self):
+        return '<unix-style fs root:%s wd:%s>' % (
+                self.root,
+                self.wd
+                )
+        
 if os.name == 'posix':
-
-	class unix_filesystem (os_filesystem):
-		pass
-
-	class schizophrenic_unix_filesystem (os_filesystem):
-		PROCESS_UID		= os.getuid()
-		PROCESS_EUID	= os.geteuid()
-		PROCESS_GID		= os.getgid()
-		PROCESS_EGID	= os.getegid()
-
-		def __init__ (self, root, wd='/', persona=(None, None)):
-			os_filesystem.__init__ (self, root, wd)
-			self.persona = persona
-
-		def become_persona (self):
-			if self.persona is not (None, None):
-				uid, gid = self.persona
-				# the order of these is important!
-				os.setegid (gid)
-				os.seteuid (uid)
-
-		def become_nobody (self):
-			if self.persona is not (None, None):
-				os.seteuid (self.PROCESS_UID)
-				os.setegid (self.PROCESS_GID)
-
-		# cwd, cdup, open, listdir
-		def cwd (self, path):
-			try:
-				self.become_persona()
-				return os_filesystem.cwd (self, path)
-			finally:
-				self.become_nobody()
-
-		def cdup (self, path):
-			try:
-				self.become_persona()
-				return os_filesystem.cdup (self)
-			finally:
-				self.become_nobody()
-
-		def open (self, filename, mode):
-			try:
-				self.become_persona()
-				return os_filesystem.open (self, filename, mode)
-			finally:
-				self.become_nobody()
-
-		def listdir (self, path, long=0):
-			try:
-				self.become_persona()
-				return os_filesystem.listdir (self, path, long)
-			finally:
-				self.become_nobody()
-
-# This hasn't been very reliable across different platforms.
-# maybe think about a separate 'directory server'.
-#
-#	import posixpath
-#	import fcntl
-#	import FCNTL
-#	import select
-#	import asyncore
-#
-#	# pipes /bin/ls for directory listings.
-#	class unix_filesystem (os_filesystem):
-#		pass
-# 		path_module = posixpath
-#
-# 		def listdir (self, path, long=0):
-# 			p = self.translate (path)
-# 			if not long:
-# 				return list_producer (os.listdir (p), 0, None)
-# 			else:
-# 				command = '/bin/ls -l %s' % p
-# 				print 'opening pipe to "%s"' % command
-# 				fd = os.popen (command, 'rt')
-# 				return pipe_channel (fd)
-#
-# 	# this is both a dispatcher, _and_ a producer
-# 	class pipe_channel (asyncore.file_dispatcher):
-# 		buffer_size = 4096
-#
-# 		def __init__ (self, fd):
-# 			asyncore.file_dispatcher.__init__ (self, fd)
-# 			self.fd = fd
-# 			self.done = 0
-# 			self.data = ''
-#
-# 		def handle_read (self):
-# 			if len (self.data) < self.buffer_size:
-# 				self.data = self.data + self.fd.read (self.buffer_size)
-# 			#print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
-#
-# 		def handle_expt (self):
-# 			#print '%s.handle_expt()' % self
-# 			self.done = 1
-#
-# 		def ready (self):
-# 			#print '%s.ready() => %d' % (self, len(self.data))
-# 			return ((len (self.data) > 0) or self.done)
-#
-# 		def more (self):
-# 			if self.data:
-# 				r = self.data
-# 				self.data = ''
-# 			elif self.done:
-# 				self.close()
-# 				self.downstream.finished()
-# 				r = ''
-# 			else:
-# 				r = None
-# 			#print '%s.more() => %s' % (self, (r and len(r)))
-# 			return r
 
-# For the 'real' root, we could obtain a list of drives, and then
-# use that.  Doesn't win32 provide such a 'real' filesystem?
-# [yes, I think something like this "\\.\c\windows"]
-
+    class unix_filesystem (os_filesystem):
+        pass
+        
+    class schizophrenic_unix_filesystem (os_filesystem):
+        PROCESS_UID		= os.getuid()
+        PROCESS_EUID	= os.geteuid()
+        PROCESS_GID		= os.getgid()
+        PROCESS_EGID	= os.getegid()
+        
+        def __init__ (self, root, wd='/', persona=(None, None)):
+            os_filesystem.__init__ (self, root, wd)
+            self.persona = persona
+            
+        def become_persona (self):
+            if self.persona is not (None, None):
+                uid, gid = self.persona
+                # the order of these is important!
+                os.setegid (gid)
+                os.seteuid (uid)
+                
+        def become_nobody (self):
+            if self.persona is not (None, None):
+                os.seteuid (self.PROCESS_UID)
+                os.setegid (self.PROCESS_GID)
+                
+                # cwd, cdup, open, listdir
+        def cwd (self, path):
+            try:
+                self.become_persona()
+                return os_filesystem.cwd (self, path)
+            finally:
+                self.become_nobody()
+                
+        def cdup (self, path):
+            try:
+                self.become_persona()
+                return os_filesystem.cdup (self)
+            finally:
+                self.become_nobody()
+                
+        def open (self, filename, mode):
+            try:
+                self.become_persona()
+                return os_filesystem.open (self, filename, mode)
+            finally:
+                self.become_nobody()
+                
+        def listdir (self, path, long=0):
+            try:
+                self.become_persona()
+                return os_filesystem.listdir (self, path, long)
+            finally:
+                self.become_nobody()
+                
+                # This hasn't been very reliable across different platforms.
+                # maybe think about a separate 'directory server'.
+                #
+                #	import posixpath
+                #	import fcntl
+                #	import FCNTL
+                #	import select
+                #	import asyncore
+                #
+                #	# pipes /bin/ls for directory listings.
+                #	class unix_filesystem (os_filesystem):
+                #		pass
+                # 		path_module = posixpath
+                #
+                # 		def listdir (self, path, long=0):
+                # 			p = self.translate (path)
+                # 			if not long:
+                # 				return list_producer (os.listdir (p), 0, None)
+                # 			else:
+                # 				command = '/bin/ls -l %s' % p
+                # 				print 'opening pipe to "%s"' % command
+                # 				fd = os.popen (command, 'rt')
+                # 				return pipe_channel (fd)
+                #
+                # 	# this is both a dispatcher, _and_ a producer
+                # 	class pipe_channel (asyncore.file_dispatcher):
+                # 		buffer_size = 4096
+                #
+                # 		def __init__ (self, fd):
+                # 			asyncore.file_dispatcher.__init__ (self, fd)
+                # 			self.fd = fd
+                # 			self.done = 0
+                # 			self.data = ''
+                #
+                # 		def handle_read (self):
+                # 			if len (self.data) < self.buffer_size:
+                # 				self.data = self.data + self.fd.read (self.buffer_size)
+                # 			#print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
+                #
+                # 		def handle_expt (self):
+                # 			#print '%s.handle_expt()' % self
+                # 			self.done = 1
+                #
+                # 		def ready (self):
+                # 			#print '%s.ready() => %d' % (self, len(self.data))
+                # 			return ((len (self.data) > 0) or self.done)
+                #
+                # 		def more (self):
+                # 			if self.data:
+                # 				r = self.data
+                # 				self.data = ''
+                # 			elif self.done:
+                # 				self.close()
+                # 				self.downstream.finished()
+                # 				r = ''
+                # 			else:
+                # 				r = None
+                # 			#print '%s.more() => %s' % (self, (r and len(r)))
+                # 			return r
+                
+                # For the 'real' root, we could obtain a list of drives, and then
+                # use that.  Doesn't win32 provide such a 'real' filesystem?
+                # [yes, I think something like this "\\.\c\windows"]
+                
 class msdos_filesystem (os_filesystem):
-	def longify (self, (path, stat_info)):
-		return msdos_longify (path, stat_info)
-
-# A merged filesystem will let you plug other filesystems together.
-# We really need the equivalent of a 'mount' capability - this seems
-# to be the most general idea.  So you'd use a 'mount' method to place
-# another filesystem somewhere in the hierarchy.
-
-# Note: this is most likely how I will handle ~user directories
-# with the http server.
-
+    def longify (self, (path, stat_info)):
+        return msdos_longify (path, stat_info)
+        
+        # A merged filesystem will let you plug other filesystems together.
+        # We really need the equivalent of a 'mount' capability - this seems
+        # to be the most general idea.  So you'd use a 'mount' method to place
+        # another filesystem somewhere in the hierarchy.
+        
+        # Note: this is most likely how I will handle ~user directories
+        # with the http server.
+        
 class merged_filesystem:
-	def __init__ (self, *fsys):
-		pass
-
-# this matches the output of NT's ftp server (when in
-# MSDOS mode) exactly.
-
+    def __init__ (self, *fsys):
+        pass
+        
+        # this matches the output of NT's ftp server (when in
+        # MSDOS mode) exactly.
+        
 def msdos_longify (file, stat_info):
-	if stat.S_ISDIR (stat_info[stat.ST_MODE]):
-		dir = '<DIR>'
-	else:
-		dir = '     '
-	date = msdos_date (stat_info[stat.ST_MTIME])
-	return '%s       %s %8d %s' % (
-		date,
-		dir,
-		stat_info[stat.ST_SIZE],
-		file
-		)
-
+    if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+        dir = '<DIR>'
+    else:
+        dir = '     '
+    date = msdos_date (stat_info[stat.ST_MTIME])
+    return '%s       %s %8d %s' % (
+            date,
+            dir,
+            stat_info[stat.ST_SIZE],
+            file
+            )
+    
 def msdos_date (t):
-	try:
-		info = time.gmtime (t)
-	except:
-		info = time.gmtime (0)
-	# year, month, day, hour, minute, second, ...
-	if info[3] > 11:
-		merid = 'PM'
-		info[3] = info[3] - 12
-	else:
-		merid = 'AM'
-	return '%02d-%02d-%02d  %02d:%02d%s' % (
-		info[1],
-		info[2],
-		info[0]%100,
-		info[3],
-		info[4],
-		merid
-		)
-
+    try:
+        info = time.gmtime (t)
+    except:
+        info = time.gmtime (0)
+        # year, month, day, hour, minute, second, ...
+    if info[3] > 11:
+        merid = 'PM'
+        info[3] = info[3] - 12
+    else:
+        merid = 'AM'
+    return '%02d-%02d-%02d  %02d:%02d%s' % (
+            info[1],
+            info[2],
+            info[0]%100,
+            info[3],
+            info[4],
+            merid
+            )
+    
 months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
-		  'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+                  'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
 
 mode_table = {
-	'0':'---',
-	'1':'--x',
-	'2':'-w-',
-	'3':'-wx',
-	'4':'r--',
-	'5':'r-x',
-	'6':'rw-',
-	'7':'rwx'
-	}
+        '0':'---',
+        '1':'--x',
+        '2':'-w-',
+        '3':'-wx',
+        '4':'r--',
+        '5':'r-x',
+        '6':'rw-',
+        '7':'rwx'
+        }
 
 import time
 
 def unix_longify (file, stat_info):
-	# for now, only pay attention to the lower bits
-	mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
-	mode = string.join (map (lambda x: mode_table[x], mode), '')
-	if stat.S_ISDIR (stat_info[stat.ST_MODE]):
-		dirchar = 'd'
-	else:
-		dirchar = '-'
-	date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
-	return '%s%s %3d %-8s %-8s %8d %s %s' % (
-		dirchar,
-		mode,
-		stat_info[stat.ST_NLINK],
-		stat_info[stat.ST_UID],
-		stat_info[stat.ST_GID],
-		stat_info[stat.ST_SIZE],
-		date,
-		file
-		)
-		
-# Emulate the unix 'ls' command's date field.
-# it has two formats - if the date is more than 180
-# days in the past, then it's like this:
-# Oct 19  1995
-# otherwise, it looks like this:
-# Oct 19 17:33
-
+        # for now, only pay attention to the lower bits
+    mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
+    mode = string.join (map (lambda x: mode_table[x], mode), '')
+    if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+        dirchar = 'd'
+    else:
+        dirchar = '-'
+    date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
+    return '%s%s %3d %-8s %-8s %8d %s %s' % (
+            dirchar,
+            mode,
+            stat_info[stat.ST_NLINK],
+            stat_info[stat.ST_UID],
+            stat_info[stat.ST_GID],
+            stat_info[stat.ST_SIZE],
+            date,
+            file
+            )
+    
+    # Emulate the unix 'ls' command's date field.
+    # it has two formats - if the date is more than 180
+    # days in the past, then it's like this:
+    # Oct 19  1995
+    # otherwise, it looks like this:
+    # Oct 19 17:33
+    
 def ls_date (now, t):
-	try:
-		info = time.gmtime (t)
-	except:
-		info = time.gmtime (0)
-	# 15,600,000 == 86,400 * 180
-	if (now - t) > 15600000:
-		return '%s %2d  %d' % (
-			months[info[1]-1],
-			info[2],
-			info[0]
-			)
-	else:
-		return '%s %2d %02d:%02d' % (
-			months[info[1]-1],
-			info[2],
-			info[3],
-			info[4]
-			)
-
-# ===========================================================================
-# Producers
-# ===========================================================================
-
+    try:
+        info = time.gmtime (t)
+    except:
+        info = time.gmtime (0)
+        # 15,600,000 == 86,400 * 180
+    if (now - t) > 15600000:
+        return '%s %2d  %d' % (
+                months[info[1]-1],
+                info[2],
+                info[0]
+                )
+    else:
+        return '%s %2d %02d:%02d' % (
+                months[info[1]-1],
+                info[2],
+                info[3],
+                info[4]
+                )
+        
+        # ===========================================================================
+        # Producers
+        # ===========================================================================
+        
 class list_producer:
-	def __init__ (self, file_list, long, longify):
-		self.file_list = file_list
-		self.long = long
-		self.longify = longify
-		self.done = 0
-
-	def ready (self):
-		if len(self.file_list):
-			return 1
-		else:
-			if not self.done:
-				self.done = 1
-			return 0
-		return (len(self.file_list) > 0)
-
-	# this should do a pushd/popd
-	def more (self):
-		if not self.file_list:
-			return ''
-		else:
-			# do a few at a time
-			bunch = self.file_list[:50]
-			if self.long:
-				bunch = map (self.longify, bunch)
-			self.file_list = self.file_list[50:]
-			return string.joinfields (bunch, '\r\n') + '\r\n'
-
+    def __init__ (self, file_list, long, longify):
+        self.file_list = file_list
+        self.long = long
+        self.longify = longify
+        self.done = 0
+        
+    def ready (self):
+        if len(self.file_list):
+            return 1
+        else:
+            if not self.done:
+                self.done = 1
+            return 0
+        return (len(self.file_list) > 0)
+        
+        # this should do a pushd/popd
+    def more (self):
+        if not self.file_list:
+            return ''
+        else:
+                # do a few at a time
+            bunch = self.file_list[:50]
+            if self.long:
+                bunch = map (self.longify, bunch)
+            self.file_list = self.file_list[50:]
+            return string.joinfields (bunch, '\r\n') + '\r\n'
+            

--- Updated File ftp_server.py in package Zope2 --
--- ftp_server.py	2001/04/27 18:26:51	1.16
+++ ftp_server.py	2001/05/01 11:44:48	1.17
@@ -58,1071 +58,1071 @@
 
 class ftp_channel (asynchat.async_chat):
 
-	# defaults for a reliable __repr__
-	addr = ('unknown','0')
-
-	# unset this in a derived class in order
-	# to enable the commands in 'self.write_commands'
-	read_only = 1
-	write_commands = ['appe','dele','mkd','rmd','rnfr','rnto','stor','stou']
-
-	restart_position = 0
-
-	# comply with (possibly troublesome) RFC959 requirements
-	# This is necessary to correctly run an active data connection
-	# through a firewall that triggers on the source port (expected
-	# to be 'L-1', or 20 in the normal case).
-	bind_local_minus_one = 0
-
-	def __init__ (self, server, conn, addr):
-		self.server = server
-		self.current_mode = 'a'
-		self.addr = addr
-		asynchat.async_chat.__init__ (self, conn)
-		self.set_terminator ('\r\n')
-
-		# client data port.  Defaults to 'the same as the control connection'.
-		self.client_addr = (addr[0], 21)
-
-		self.client_dc = None
-		self.in_buffer = ''
-		self.closing = 0
-		self.passive_acceptor = None
-		self.passive_connection = None
-		self.filesystem = None
-		self.authorized = 0
-		# send the greeting
-		self.respond (
-			'220 %s FTP server (Medusa Async V%s [experimental]) ready.' % (
-				self.server.hostname,
-				VERSION
-				)
-			)
-
-#	def __del__ (self):
-#		print 'ftp_channel.__del__()'
-
-	# --------------------------------------------------
-	# async-library methods
-	# --------------------------------------------------
-
-	def handle_expt (self):
-		# this is handled below.  not sure what I could
-		# do here to make that code less kludgish.
-		pass
-
-	def collect_incoming_data (self, data):
-		self.in_buffer = self.in_buffer + data
-		if len(self.in_buffer) > 4096:
-			# silently truncate really long lines
-			# (possible denial-of-service attack)
-			self.in_buffer = ''
-
-	def found_terminator (self):
-
-		line = self.in_buffer
-
-		if not len(line):
-			return
-
-		sp = string.find (line, ' ')
-		if sp != -1:
-			line = [line[:sp], line[sp+1:]]
-		else:
-			line = [line]
-
-		command = string.lower (line[0])
-		# watch especially for 'urgent' abort commands.
-		if string.find (command, 'abor') != -1:
-			# strip off telnet sync chars and the like...
-			while command and command[0] not in string.letters:
-				command = command[1:]
-		fun_name = 'cmd_%s' % command
-		if command != 'pass':
-			self.log ('<== %s' % repr(self.in_buffer)[1:-1])
-		else:
-			self.log ('<== %s' % line[0]+' <password>')
-		self.in_buffer = ''
-		if not hasattr (self, fun_name):
-			self.command_not_understood (line[0])
-			return
-		fun = getattr (self, fun_name)
-		if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')):
-			self.respond ('530 Please log in with USER and PASS')
-		elif (not self.check_command_authorization (command)):
-			self.command_not_authorized (command)
-		else:
-			try:
-				result = apply (fun, (line,))
-			except:
-				self.server.total_exceptions.increment()
-				(file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
-				if self.client_dc:
-					try:
-						self.client_dc.close()
-					except:
-						pass
-				self.respond (
-					'451 Server Error: %s, %s: file: %s line: %s' % (
-						t,v,file,line,
-						)
-					)
-
-	closed = 0
-	def close (self):
-		if not self.closed:
-			self.closed = 1
-			if self.passive_acceptor:
-				self.passive_acceptor.close()
-			if self.client_dc:
-				self.client_dc.close()
-			self.server.closed_sessions.increment()
-			asynchat.async_chat.close (self)
-
-	# --------------------------------------------------
-	# filesystem interface functions.
-	# override these to provide access control or perform
-	# other functions.
-	# --------------------------------------------------
-
-	def cwd (self, line):
-		return self.filesystem.cwd (line[1])
-
-	def cdup (self, line):
-		return self.filesystem.cdup()
-
-	def open (self, path, mode):
-		return self.filesystem.open (path, mode)
-
-	# returns a producer
-	def listdir (self, path, long=0):
-		return self.filesystem.listdir (path, long)
-
-	def get_dir_list (self, line, long=0):
-		# we need to scan the command line for arguments to '/bin/ls'...
-		args = line[1:]
-		path_args = []
-		for arg in args:
-			if arg[0] != '-':
-				path_args.append (arg)
-			else:
-				# ignore arguments
-				pass
-		if len(path_args) < 1:
-			dir = '.'
-		else:
-			dir = path_args[0]
-		return self.listdir (dir, long)
-
-	# --------------------------------------------------
-	# authorization methods
-	# --------------------------------------------------
-
-	def check_command_authorization (self, command):
-		if command in self.write_commands and self.read_only:
-			return 0
-		else:
-			return 1
-
-	# --------------------------------------------------
-	# utility methods
-	# --------------------------------------------------
-
-	def log (self, message):
-		self.server.logger.log (
-			self.addr[0],
-			'%d %s' % (
-				self.addr[1], message
-				)
-			)
-
-	def respond (self, resp):
-		self.log ('==> %s' % resp)
-		self.push (resp + '\r\n')
-
-	def command_not_understood (self, command):
-		self.respond ("500 '%s': command not understood." % command)
-
-	def command_not_authorized (self, command):
-		self.respond (
-			"530 You are not authorized to perform the '%s' command" % (
-				command
-				)
-			)
-
-	def make_xmit_channel (self):
-		# In PASV mode, the connection may or may _not_ have been made
-		# yet.  [although in most cases it is... FTP Explorer being
-		# the only exception I've yet seen].  This gets somewhat confusing
-		# because things may happen in any order...
-		pa = self.passive_acceptor
-		if pa:
-			if pa.ready:
-				# a connection has already been made.
-				conn, addr = self.passive_acceptor.ready
-				cdc = xmit_channel (self, addr)
-				cdc.set_socket (conn)
-				cdc.connected = 1
-				self.passive_acceptor.close()
-				self.passive_acceptor = None				
-			else:
-				# we're still waiting for a connect to the PASV port.
-				cdc = xmit_channel (self)
-		else:
-			# not in PASV mode.
-			ip, port = self.client_addr
-			cdc = xmit_channel (self, self.client_addr)
-			cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-			if self.bind_local_minus_one:
-				cdc.bind (('', self.server.port - 1))
-			try:
-				cdc.connect ((ip, port))
-			except socket.error, why:
-				self.respond ("425 Can't build data connection")
-		self.client_dc = cdc
-
-	# pretty much the same as xmit, but only right on the verge of
-	# being worth a merge.
-	def make_recv_channel (self, fd):
-		pa = self.passive_acceptor
-		if pa:
-			if pa.ready:
-				# a connection has already been made.
-				conn, addr = pa.ready
-				cdc = recv_channel (self, addr, fd)
-				cdc.set_socket (conn)
-				cdc.connected = 1
-				self.passive_acceptor.close()
-				self.passive_acceptor = None				
-			else:
-				# we're still waiting for a connect to the PASV port.
-				cdc = recv_channel (self, None, fd)
-		else:
-			# not in PASV mode.
-			ip, port = self.client_addr
-			cdc = recv_channel (self, self.client_addr, fd)
-			cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-			try:
-				cdc.connect ((ip, port))
-			except socket.error, why:
-				self.respond ("425 Can't build data connection")
-		self.client_dc = cdc
-
-	type_map = {
-		'a':'ASCII',
-		'i':'Binary',
-		'e':'EBCDIC',
-		'l':'Binary'
-		}
-
-	type_mode_map = {
-		'a':'t',
-		'i':'b',
-		'e':'b',
-		'l':'b'
-		}
-
-	# --------------------------------------------------
-	# command methods
-	# --------------------------------------------------
-
-	def cmd_type (self, line):
-		'specify data transfer type'
-		# ascii, ebcdic, image, local <byte size>
-		t = string.lower (line[1])
-		# no support for EBCDIC
-		# if t not in ['a','e','i','l']:
-		if t not in ['a','i','l']:
-			self.command_not_understood (string.join (line))
-		elif t == 'l' and (len(line) > 2 and line[2] != '8'):
-			self.respond ('504 Byte size must be 8')
-		else:
-			self.current_mode = t
-			self.respond ('200 Type set to %s.' % self.type_map[t])
-
-
-	def cmd_quit (self, line):
-		'terminate session'
-		self.respond ('221 Goodbye.')
-		self.close_when_done()
-
-	def cmd_port (self, line):
-		'specify data connection port'
-		info = string.split (line[1], ',')
-		ip = string.join (info[:4], '.')
-		port = string.atoi(info[4])*256 + string.atoi(info[5])
-		# how many data connections at a time?
-		# I'm assuming one for now...
-		# TODO: we should (optionally) verify that the
-		# ip number belongs to the client.  [wu-ftpd does this?]
-		self.client_addr = (ip, port)
-		self.respond ('200 PORT command successful.')
-
-	def new_passive_acceptor (self):
-		# ensure that only one of these exists at a time.
-		if self.passive_acceptor is not None:
-			self.passive_acceptor.close()
-			self.passive_acceptor = None
-		self.passive_acceptor = passive_acceptor (self)
-		return self.passive_acceptor
-
-	def cmd_pasv (self, line):
-		'prepare for server-to-server transfer'
-		pc = self.new_passive_acceptor()
-		port = pc.addr[1]
-		ip_addr = pc.control_channel.getsockname()[0]
-		self.respond (
-			'227 Entering Passive Mode (%s,%d,%d)' % (
-				string.join (string.split (ip_addr, '.'), ','),
-				port/256,
-				port%256
-				)
-			)
-		self.client_dc = None
-
-	def cmd_nlst (self, line):
-		'give name list of files in directory'
-		# ncftp adds the -FC argument for the user-visible 'nlist'
-		# command.  We could try to emulate ls flags, but not just yet.
-		if '-FC' in line:
-			line.remove ('-FC')
-		try:
-			dir_list_producer = self.get_dir_list (line, 0)
-		except os.error, why:
-			self.respond ('550 Could not list directory: %s' % repr(why))
-			return
-		self.respond (
-			'150 Opening %s mode data connection for file list' % (
-				self.type_map[self.current_mode]
-				)
-			)
-		self.make_xmit_channel()
-		self.client_dc.push_with_producer (dir_list_producer)
-		self.client_dc.close_when_done()
-
-	def cmd_list (self, line):
-		'give list files in a directory'
-		try:
-			dir_list_producer = self.get_dir_list (line, 1)
-		except os.error, why:
-			self.respond ('550 Could not list directory: %s' % repr(why))
-			return
-		self.respond (
-			'150 Opening %s mode data connection for file list' % (
-				self.type_map[self.current_mode]
-				)
-			)
-		self.make_xmit_channel()
-		self.client_dc.push_with_producer (dir_list_producer)
-		self.client_dc.close_when_done()
-
-	def cmd_cwd (self, line):
-		'change working directory'
-		if self.cwd (line):
-			self.respond ('250 CWD command successful.')
-		else:
-			self.respond ('550 No such directory.')			
-
-	def cmd_cdup (self, line):
-		'change to parent of current working directory'
-		if self.cdup(line):
-			self.respond ('250 CDUP command successful.')
-		else:
-			self.respond ('550 No such directory.')
-		
-	def cmd_pwd (self, line):
-		'print the current working directory'
-		self.respond (
-			'257 "%s" is the current directory.' % (
-				self.filesystem.current_directory()
-				)
-			)
-
-	# modification time
-	# example output:
-	# 213 19960301204320
-	def cmd_mdtm (self, line):
-		'show last modification time of file'
-		filename = line[1]
-		if not self.filesystem.isfile (filename):
-			self.respond ('550 "%s" is not a file' % filename)
-		else:
-			mtime = time.gmtime(self.filesystem.stat(filename)[stat.ST_MTIME])
-			self.respond (
-				'213 %4d%02d%02d%02d%02d%02d' % (
-					mtime[0],
-					mtime[1],
-					mtime[2],
-					mtime[3],
-					mtime[4],
-					mtime[5]
-					)
-				)
-
-	def cmd_noop (self, line):
-		'do nothing'
-		self.respond ('200 NOOP command successful.')
-
-	def cmd_size (self, line):
-		'return size of file'
-		filename = line[1]
-		if not self.filesystem.isfile (filename):
-			self.respond ('550 "%s" is not a file' % filename)
-		else:
-			self.respond (
-				'213 %d' % (self.filesystem.stat(filename)[stat.ST_SIZE])
-				)
-
-	def cmd_retr (self, line):
-		'retrieve a file'
-		if len(line) < 2:
-			self.command_not_understood (string.join (line))
-		else:
-			file = line[1]
-			if not self.filesystem.isfile (file):
-				self.log_info ('checking %s' % file)
-				self.respond ('550 No such file')
-			else:
-				try:
-					# FIXME: for some reason, 'rt' isn't working on win95
-					mode = 'r'+self.type_mode_map[self.current_mode]
-					fd = self.open (file, mode)
-				except IOError, why:
-					self.respond ('553 could not open file for reading: %s' % (repr(why)))
-					return
-				self.respond (
-					"150 Opening %s mode data connection for file '%s'" % (
-						self.type_map[self.current_mode],
-						file
-						)
-					)
-				self.make_xmit_channel()
-
-				if self.restart_position:
-					# try to position the file as requested, but
-					# give up silently on failure (the 'file object'
-					# may not support seek())
-					try:
-						fd.seek (self.restart_position)
-					except:
-						pass
-					self.restart_position = 0
-
-				self.client_dc.push_with_producer (
-					file_producer (self, self.client_dc, fd)
-					)
-				self.client_dc.close_when_done()
-
-	def cmd_stor (self, line, mode='wb'):
-		'store a file'
-		if len (line) < 2:
-			self.command_not_understood (string.join (line))
-		else:
-			if self.restart_position:
-				restart_position = 0
-				self.respond ('553 restart on STOR not yet supported')
-				return
-			file = line[1]
-			# todo: handle that type flag
-			try:
-				fd = self.open (file, mode)
-			except IOError, why:
-				self.respond ('553 could not open file for writing: %s' % (repr(why)))
-				return
-			self.respond (
-				'150 Opening %s connection for %s' % (
-					self.type_map[self.current_mode],
-					file
-					)
-				)
-			self.make_recv_channel (fd)
-
-	def cmd_abor (self, line):
-		'abort operation'
-		if self.client_dc:
-			self.client_dc.close()
-		self.respond ('226 ABOR command successful.')
-
-	def cmd_appe (self, line):
-		'append to a file'
-		return self.cmd_stor (line, 'ab')
-
-	def cmd_dele (self, line):
-		if len (line) != 2:
-			self.command_not_understood (string.join (line))
-		else:
-			file = line[1]
-			if self.filesystem.isfile (file):
-				try:
-					self.filesystem.unlink (file)
-					self.respond ('250 DELE command successful.')
-				except:
-					self.respond ('550 error deleting file.')
-			else:
-				self.respond ('550 %s: No such file.' % file)
-
-	def cmd_mkd (self, line):
-		if len (line) != 2:
-			self.command.not_understood (string.join (line))
-		else:
-			path = line[1]
-			try:
-				self.filesystem.mkdir (path)
-				self.respond ('257 MKD command successful.')
-			except:
-				self.respond ('550 error creating directory.')
-
-	def cmd_rmd (self, line):
-		if len (line) != 2:
-			self.command.not_understood (string.join (line))
-		else:
-			path = line[1]
-			try:
-				self.filesystem.rmdir (path)
-				self.respond ('250 RMD command successful.')
-			except:
-				self.respond ('550 error removing directory.')
-
-	def cmd_user (self, line):
-		'specify user name'
-		if len(line) > 1:
-			self.user = line[1]
-			self.respond ('331 Password required.')
-		else:
-			self.command_not_understood (string.join (line))
-
-	def cmd_pass (self, line):
-		'specify password'
-		if len(line) < 2:
-			pw = ''
-		else:
-			pw = line[1]
-		result, message, fs = self.server.authorizer.authorize (self, self.user, pw)
-		if result:
-			self.respond ('230 %s' % message)
-			self.filesystem = fs
-			self.authorized = 1
-			self.log_info('Successful login: Filesystem=%s' % repr(fs))
-		else:
-			self.respond ('530 %s' % message)
-
-	def cmd_rest (self, line):
-		'restart incomplete transfer'
-		try:
-			pos = string.atoi (line[1])
-		except ValueError:
-			self.command_not_understood (string.join (line))
-		self.restart_position = pos
-		self.respond (
-			'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
-			)
-
-	def cmd_stru (self, line):
-		'obsolete - set file transfer structure'
-		if line[1] in 'fF':
-			# f == 'file'
-			self.respond ('200 STRU F Ok')
-		else:
-			self.respond ('504 Unimplemented STRU type')
-
-	def cmd_mode (self, line):
-		'obsolete - set file transfer mode'
-		if line[1] in 'sS':
-			# f == 'file'
-			self.respond ('200 MODE S Ok')
-		else:
-			self.respond ('502 Unimplemented MODE type')
-
-# The stat command has two personalities.  Normally it returns status
-# information about the current connection.  But if given an argument,
-# it is equivalent to the LIST command, with the data sent over the
-# control connection.  Strange.  But wuftpd, ftpd, and nt's ftp server
-# all support it.
-#
-##	def cmd_stat (self, line):
-##		'return status of server'
-##		pass
-
-	def cmd_syst (self, line):
-		'show operating system type of server system'
-		# Replying to this command is of questionable utility, because
-		# this server does not behave in a predictable way w.r.t. the
-		# output of the LIST command.  We emulate Unix ls output, but
-		# on win32 the pathname can contain drive information at the front
-		# Currently, the combination of ensuring that os.sep == '/'
-		# and removing the leading slash when necessary seems to work.
-		# [cd'ing to another drive also works]
-		#
-		# This is how wuftpd responds, and is probably
-		# the most expected.  The main purpose of this reply is so that
-		# the client knows to expect Unix ls-style LIST output.
-		self.respond ('215 UNIX Type: L8')
-		# one disadvantage to this is that some client programs
-		# assume they can pass args to /bin/ls.
-		# a few typical responses:
-		# 215 UNIX Type: L8 (wuftpd)
-		# 215 Windows_NT version 3.51
-		# 215 VMS MultiNet V3.3
-		# 500 'SYST': command not understood. (SVR4)
-
-	def cmd_help (self, line):
-		'give help information'
-		# find all the methods that match 'cmd_xxxx',
-		# use their docstrings for the help response.
-		attrs = dir(self.__class__)
-		help_lines = []
-		for attr in attrs:
-			if attr[:4] == 'cmd_':
-				x = getattr (self, attr)
-				if type(x) == type(self.cmd_help):
-					if x.__doc__:
-						help_lines.append ('\t%s\t%s' % (attr[4:], x.__doc__))
-		if help_lines:
-			self.push ('214-The following commands are recognized\r\n')
-			self.push_with_producer (producers.lines_producer (help_lines))
-			self.push ('214\r\n')
-		else:
-			self.push ('214-\r\n\tHelp Unavailable\r\n214\r\n')
-
+        # defaults for a reliable __repr__
+    addr = ('unknown','0')
+    
+    # unset this in a derived class in order
+    # to enable the commands in 'self.write_commands'
+    read_only = 1
+    write_commands = ['appe','dele','mkd','rmd','rnfr','rnto','stor','stou']
+    
+    restart_position = 0
+    
+    # comply with (possibly troublesome) RFC959 requirements
+    # This is necessary to correctly run an active data connection
+    # through a firewall that triggers on the source port (expected
+    # to be 'L-1', or 20 in the normal case).
+    bind_local_minus_one = 0
+    
+    def __init__ (self, server, conn, addr):
+        self.server = server
+        self.current_mode = 'a'
+        self.addr = addr
+        asynchat.async_chat.__init__ (self, conn)
+        self.set_terminator ('\r\n')
+        
+        # client data port.  Defaults to 'the same as the control connection'.
+        self.client_addr = (addr[0], 21)
+        
+        self.client_dc = None
+        self.in_buffer = ''
+        self.closing = 0
+        self.passive_acceptor = None
+        self.passive_connection = None
+        self.filesystem = None
+        self.authorized = 0
+        # send the greeting
+        self.respond (
+                '220 %s FTP server (Medusa Async V%s [experimental]) ready.' % (
+                        self.server.hostname,
+                        VERSION
+                        )
+                )
+        
+        #	def __del__ (self):
+        #		print 'ftp_channel.__del__()'
+        
+        # --------------------------------------------------
+        # async-library methods
+        # --------------------------------------------------
+        
+    def handle_expt (self):
+            # this is handled below.  not sure what I could
+            # do here to make that code less kludgish.
+        pass
+        
+    def collect_incoming_data (self, data):
+        self.in_buffer = self.in_buffer + data
+        if len(self.in_buffer) > 4096:
+                # silently truncate really long lines
+                # (possible denial-of-service attack)
+            self.in_buffer = ''
+            
+    def found_terminator (self):
+    
+        line = self.in_buffer
+        
+        if not len(line):
+            return
+            
+        sp = string.find (line, ' ')
+        if sp != -1:
+            line = [line[:sp], line[sp+1:]]
+        else:
+            line = [line]
+            
+        command = string.lower (line[0])
+        # watch especially for 'urgent' abort commands.
+        if string.find (command, 'abor') != -1:
+                # strip off telnet sync chars and the like...
+            while command and command[0] not in string.letters:
+                command = command[1:]
+        fun_name = 'cmd_%s' % command
+        if command != 'pass':
+            self.log ('<== %s' % repr(self.in_buffer)[1:-1])
+        else:
+            self.log ('<== %s' % line[0]+' <password>')
+        self.in_buffer = ''
+        if not hasattr (self, fun_name):
+            self.command_not_understood (line[0])
+            return
+        fun = getattr (self, fun_name)
+        if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')):
+            self.respond ('530 Please log in with USER and PASS')
+        elif (not self.check_command_authorization (command)):
+            self.command_not_authorized (command)
+        else:
+            try:
+                result = apply (fun, (line,))
+            except:
+                self.server.total_exceptions.increment()
+                (file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
+                if self.client_dc:
+                    try:
+                        self.client_dc.close()
+                    except:
+                        pass
+                self.respond (
+                        '451 Server Error: %s, %s: file: %s line: %s' % (
+                                t,v,file,line,
+                                )
+                        )
+                
+    closed = 0
+    def close (self):
+        if not self.closed:
+            self.closed = 1
+            if self.passive_acceptor:
+                self.passive_acceptor.close()
+            if self.client_dc:
+                self.client_dc.close()
+            self.server.closed_sessions.increment()
+            asynchat.async_chat.close (self)
+            
+            # --------------------------------------------------
+            # filesystem interface functions.
+            # override these to provide access control or perform
+            # other functions.
+            # --------------------------------------------------
+            
+    def cwd (self, line):
+        return self.filesystem.cwd (line[1])
+        
+    def cdup (self, line):
+        return self.filesystem.cdup()
+        
+    def open (self, path, mode):
+        return self.filesystem.open (path, mode)
+        
+        # returns a producer
+    def listdir (self, path, long=0):
+        return self.filesystem.listdir (path, long)
+        
+    def get_dir_list (self, line, long=0):
+            # we need to scan the command line for arguments to '/bin/ls'...
+        args = line[1:]
+        path_args = []
+        for arg in args:
+            if arg[0] != '-':
+                path_args.append (arg)
+            else:
+                    # ignore arguments
+                pass
+        if len(path_args) < 1:
+            dir = '.'
+        else:
+            dir = path_args[0]
+        return self.listdir (dir, long)
+        
+        # --------------------------------------------------
+        # authorization methods
+        # --------------------------------------------------
+        
+    def check_command_authorization (self, command):
+        if command in self.write_commands and self.read_only:
+            return 0
+        else:
+            return 1
+            
+            # --------------------------------------------------
+            # utility methods
+            # --------------------------------------------------
+            
+    def log (self, message):
+        self.server.logger.log (
+                self.addr[0],
+                '%d %s' % (
+                        self.addr[1], message
+                        )
+                )
+        
+    def respond (self, resp):
+        self.log ('==> %s' % resp)
+        self.push (resp + '\r\n')
+        
+    def command_not_understood (self, command):
+        self.respond ("500 '%s': command not understood." % command)
+        
+    def command_not_authorized (self, command):
+        self.respond (
+                "530 You are not authorized to perform the '%s' command" % (
+                        command
+                        )
+                )
+        
+    def make_xmit_channel (self):
+            # In PASV mode, the connection may or may _not_ have been made
+            # yet.  [although in most cases it is... FTP Explorer being
+            # the only exception I've yet seen].  This gets somewhat confusing
+            # because things may happen in any order...
+        pa = self.passive_acceptor
+        if pa:
+            if pa.ready:
+                    # a connection has already been made.
+                conn, addr = self.passive_acceptor.ready
+                cdc = xmit_channel (self, addr)
+                cdc.set_socket (conn)
+                cdc.connected = 1
+                self.passive_acceptor.close()
+                self.passive_acceptor = None				
+            else:
+                    # we're still waiting for a connect to the PASV port.
+                cdc = xmit_channel (self)
+        else:
+                # not in PASV mode.
+            ip, port = self.client_addr
+            cdc = xmit_channel (self, self.client_addr)
+            cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+            if self.bind_local_minus_one:
+                cdc.bind (('', self.server.port - 1))
+            try:
+                cdc.connect ((ip, port))
+            except socket.error, why:
+                self.respond ("425 Can't build data connection")
+        self.client_dc = cdc
+        
+        # pretty much the same as xmit, but only right on the verge of
+        # being worth a merge.
+    def make_recv_channel (self, fd):
+        pa = self.passive_acceptor
+        if pa:
+            if pa.ready:
+                    # a connection has already been made.
+                conn, addr = pa.ready
+                cdc = recv_channel (self, addr, fd)
+                cdc.set_socket (conn)
+                cdc.connected = 1
+                self.passive_acceptor.close()
+                self.passive_acceptor = None				
+            else:
+                    # we're still waiting for a connect to the PASV port.
+                cdc = recv_channel (self, None, fd)
+        else:
+                # not in PASV mode.
+            ip, port = self.client_addr
+            cdc = recv_channel (self, self.client_addr, fd)
+            cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+            try:
+                cdc.connect ((ip, port))
+            except socket.error, why:
+                self.respond ("425 Can't build data connection")
+        self.client_dc = cdc
+        
+    type_map = {
+            'a':'ASCII',
+            'i':'Binary',
+            'e':'EBCDIC',
+            'l':'Binary'
+            }
+    
+    type_mode_map = {
+            'a':'t',
+            'i':'b',
+            'e':'b',
+            'l':'b'
+            }
+    
+    # --------------------------------------------------
+    # command methods
+    # --------------------------------------------------
+    
+    def cmd_type (self, line):
+        'specify data transfer type'
+        # ascii, ebcdic, image, local <byte size>
+        t = string.lower (line[1])
+        # no support for EBCDIC
+        # if t not in ['a','e','i','l']:
+        if t not in ['a','i','l']:
+            self.command_not_understood (string.join (line))
+        elif t == 'l' and (len(line) > 2 and line[2] != '8'):
+            self.respond ('504 Byte size must be 8')
+        else:
+            self.current_mode = t
+            self.respond ('200 Type set to %s.' % self.type_map[t])
+            
+            
+    def cmd_quit (self, line):
+        'terminate session'
+        self.respond ('221 Goodbye.')
+        self.close_when_done()
+        
+    def cmd_port (self, line):
+        'specify data connection port'
+        info = string.split (line[1], ',')
+        ip = string.join (info[:4], '.')
+        port = string.atoi(info[4])*256 + string.atoi(info[5])
+        # how many data connections at a time?
+        # I'm assuming one for now...
+        # TODO: we should (optionally) verify that the
+        # ip number belongs to the client.  [wu-ftpd does this?]
+        self.client_addr = (ip, port)
+        self.respond ('200 PORT command successful.')
+        
+    def new_passive_acceptor (self):
+            # ensure that only one of these exists at a time.
+        if self.passive_acceptor is not None:
+            self.passive_acceptor.close()
+            self.passive_acceptor = None
+        self.passive_acceptor = passive_acceptor (self)
+        return self.passive_acceptor
+        
+    def cmd_pasv (self, line):
+        'prepare for server-to-server transfer'
+        pc = self.new_passive_acceptor()
+        port = pc.addr[1]
+        ip_addr = pc.control_channel.getsockname()[0]
+        self.respond (
+                '227 Entering Passive Mode (%s,%d,%d)' % (
+                        string.join (string.split (ip_addr, '.'), ','),
+                        port/256,
+                        port%256
+                        )
+                )
+        self.client_dc = None
+        
+    def cmd_nlst (self, line):
+        'give name list of files in directory'
+        # ncftp adds the -FC argument for the user-visible 'nlist'
+        # command.  We could try to emulate ls flags, but not just yet.
+        if '-FC' in line:
+            line.remove ('-FC')
+        try:
+            dir_list_producer = self.get_dir_list (line, 0)
+        except os.error, why:
+            self.respond ('550 Could not list directory: %s' % repr(why))
+            return
+        self.respond (
+                '150 Opening %s mode data connection for file list' % (
+                        self.type_map[self.current_mode]
+                        )
+                )
+        self.make_xmit_channel()
+        self.client_dc.push_with_producer (dir_list_producer)
+        self.client_dc.close_when_done()
+        
+    def cmd_list (self, line):
+        'give list files in a directory'
+        try:
+            dir_list_producer = self.get_dir_list (line, 1)
+        except os.error, why:
+            self.respond ('550 Could not list directory: %s' % repr(why))
+            return
+        self.respond (
+                '150 Opening %s mode data connection for file list' % (
+                        self.type_map[self.current_mode]
+                        )
+                )
+        self.make_xmit_channel()
+        self.client_dc.push_with_producer (dir_list_producer)
+        self.client_dc.close_when_done()
+        
+    def cmd_cwd (self, line):
+        'change working directory'
+        if self.cwd (line):
+            self.respond ('250 CWD command successful.')
+        else:
+            self.respond ('550 No such directory.')			
+            
+    def cmd_cdup (self, line):
+        'change to parent of current working directory'
+        if self.cdup(line):
+            self.respond ('250 CDUP command successful.')
+        else:
+            self.respond ('550 No such directory.')
+            
+    def cmd_pwd (self, line):
+        'print the current working directory'
+        self.respond (
+                '257 "%s" is the current directory.' % (
+                        self.filesystem.current_directory()
+                        )
+                )
+        
+        # modification time
+        # example output:
+        # 213 19960301204320
+    def cmd_mdtm (self, line):
+        'show last modification time of file'
+        filename = line[1]
+        if not self.filesystem.isfile (filename):
+            self.respond ('550 "%s" is not a file' % filename)
+        else:
+            mtime = time.gmtime(self.filesystem.stat(filename)[stat.ST_MTIME])
+            self.respond (
+                    '213 %4d%02d%02d%02d%02d%02d' % (
+                            mtime[0],
+                            mtime[1],
+                            mtime[2],
+                            mtime[3],
+                            mtime[4],
+                            mtime[5]
+                            )
+                    )
+            
+    def cmd_noop (self, line):
+        'do nothing'
+        self.respond ('200 NOOP command successful.')
+        
+    def cmd_size (self, line):
+        'return size of file'
+        filename = line[1]
+        if not self.filesystem.isfile (filename):
+            self.respond ('550 "%s" is not a file' % filename)
+        else:
+            self.respond (
+                    '213 %d' % (self.filesystem.stat(filename)[stat.ST_SIZE])
+                    )
+            
+    def cmd_retr (self, line):
+        'retrieve a file'
+        if len(line) < 2:
+            self.command_not_understood (string.join (line))
+        else:
+            file = line[1]
+            if not self.filesystem.isfile (file):
+                self.log_info ('checking %s' % file)
+                self.respond ('550 No such file')
+            else:
+                try:
+                        # FIXME: for some reason, 'rt' isn't working on win95
+                    mode = 'r'+self.type_mode_map[self.current_mode]
+                    fd = self.open (file, mode)
+                except IOError, why:
+                    self.respond ('553 could not open file for reading: %s' % (repr(why)))
+                    return
+                self.respond (
+                        "150 Opening %s mode data connection for file '%s'" % (
+                                self.type_map[self.current_mode],
+                                file
+                                )
+                        )
+                self.make_xmit_channel()
+                
+                if self.restart_position:
+                        # try to position the file as requested, but
+                        # give up silently on failure (the 'file object'
+                        # may not support seek())
+                    try:
+                        fd.seek (self.restart_position)
+                    except:
+                        pass
+                    self.restart_position = 0
+                    
+                self.client_dc.push_with_producer (
+                        file_producer (self, self.client_dc, fd)
+                        )
+                self.client_dc.close_when_done()
+                
+    def cmd_stor (self, line, mode='wb'):
+        'store a file'
+        if len (line) < 2:
+            self.command_not_understood (string.join (line))
+        else:
+            if self.restart_position:
+                restart_position = 0
+                self.respond ('553 restart on STOR not yet supported')
+                return
+            file = line[1]
+            # todo: handle that type flag
+            try:
+                fd = self.open (file, mode)
+            except IOError, why:
+                self.respond ('553 could not open file for writing: %s' % (repr(why)))
+                return
+            self.respond (
+                    '150 Opening %s connection for %s' % (
+                            self.type_map[self.current_mode],
+                            file
+                            )
+                    )
+            self.make_recv_channel (fd)
+            
+    def cmd_abor (self, line):
+        'abort operation'
+        if self.client_dc:
+            self.client_dc.close()
+        self.respond ('226 ABOR command successful.')
+        
+    def cmd_appe (self, line):
+        'append to a file'
+        return self.cmd_stor (line, 'ab')
+        
+    def cmd_dele (self, line):
+        if len (line) != 2:
+            self.command_not_understood (string.join (line))
+        else:
+            file = line[1]
+            if self.filesystem.isfile (file):
+                try:
+                    self.filesystem.unlink (file)
+                    self.respond ('250 DELE command successful.')
+                except:
+                    self.respond ('550 error deleting file.')
+            else:
+                self.respond ('550 %s: No such file.' % file)
+                
+    def cmd_mkd (self, line):
+        if len (line) != 2:
+            self.command.not_understood (string.join (line))
+        else:
+            path = line[1]
+            try:
+                self.filesystem.mkdir (path)
+                self.respond ('257 MKD command successful.')
+            except:
+                self.respond ('550 error creating directory.')
+                
+    def cmd_rmd (self, line):
+        if len (line) != 2:
+            self.command.not_understood (string.join (line))
+        else:
+            path = line[1]
+            try:
+                self.filesystem.rmdir (path)
+                self.respond ('250 RMD command successful.')
+            except:
+                self.respond ('550 error removing directory.')
+                
+    def cmd_user (self, line):
+        'specify user name'
+        if len(line) > 1:
+            self.user = line[1]
+            self.respond ('331 Password required.')
+        else:
+            self.command_not_understood (string.join (line))
+            
+    def cmd_pass (self, line):
+        'specify password'
+        if len(line) < 2:
+            pw = ''
+        else:
+            pw = line[1]
+        result, message, fs = self.server.authorizer.authorize (self, self.user, pw)
+        if result:
+            self.respond ('230 %s' % message)
+            self.filesystem = fs
+            self.authorized = 1
+            self.log_info('Successful login: Filesystem=%s' % repr(fs))
+        else:
+            self.respond ('530 %s' % message)
+            
+    def cmd_rest (self, line):
+        'restart incomplete transfer'
+        try:
+            pos = string.atoi (line[1])
+        except ValueError:
+            self.command_not_understood (string.join (line))
+        self.restart_position = pos
+        self.respond (
+                '350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
+                )
+        
+    def cmd_stru (self, line):
+        'obsolete - set file transfer structure'
+        if line[1] in 'fF':
+                # f == 'file'
+            self.respond ('200 STRU F Ok')
+        else:
+            self.respond ('504 Unimplemented STRU type')
+            
+    def cmd_mode (self, line):
+        'obsolete - set file transfer mode'
+        if line[1] in 'sS':
+                # f == 'file'
+            self.respond ('200 MODE S Ok')
+        else:
+            self.respond ('502 Unimplemented MODE type')
+            
+            # The stat command has two personalities.  Normally it returns status
+            # information about the current connection.  But if given an argument,
+            # it is equivalent to the LIST command, with the data sent over the
+            # control connection.  Strange.  But wuftpd, ftpd, and nt's ftp server
+            # all support it.
+            #
+            ##	def cmd_stat (self, line):
+            ##		'return status of server'
+            ##		pass
+            
+    def cmd_syst (self, line):
+        'show operating system type of server system'
+        # Replying to this command is of questionable utility, because
+        # this server does not behave in a predictable way w.r.t. the
+        # output of the LIST command.  We emulate Unix ls output, but
+        # on win32 the pathname can contain drive information at the front
+        # Currently, the combination of ensuring that os.sep == '/'
+        # and removing the leading slash when necessary seems to work.
+        # [cd'ing to another drive also works]
+        #
+        # This is how wuftpd responds, and is probably
+        # the most expected.  The main purpose of this reply is so that
+        # the client knows to expect Unix ls-style LIST output.
+        self.respond ('215 UNIX Type: L8')
+        # one disadvantage to this is that some client programs
+        # assume they can pass args to /bin/ls.
+        # a few typical responses:
+        # 215 UNIX Type: L8 (wuftpd)
+        # 215 Windows_NT version 3.51
+        # 215 VMS MultiNet V3.3
+        # 500 'SYST': command not understood. (SVR4)
+        
+    def cmd_help (self, line):
+        'give help information'
+        # find all the methods that match 'cmd_xxxx',
+        # use their docstrings for the help response.
+        attrs = dir(self.__class__)
+        help_lines = []
+        for attr in attrs:
+            if attr[:4] == 'cmd_':
+                x = getattr (self, attr)
+                if type(x) == type(self.cmd_help):
+                    if x.__doc__:
+                        help_lines.append ('\t%s\t%s' % (attr[4:], x.__doc__))
+        if help_lines:
+            self.push ('214-The following commands are recognized\r\n')
+            self.push_with_producer (producers.lines_producer (help_lines))
+            self.push ('214\r\n')
+        else:
+            self.push ('214-\r\n\tHelp Unavailable\r\n214\r\n')
+            
 class ftp_server (asyncore.dispatcher):
-	# override this to spawn a different FTP channel class.
-	ftp_channel_class = ftp_channel
-
-	SERVER_IDENT = 'FTP Server (V%s)' % VERSION
-
-	def __init__ (
-		self,
-		authorizer,
-		hostname	=None,
-		ip			='',
-		port		=21,
-		resolver	=None,
-		logger_object=logger.file_logger (sys.stdout)
-		):
-		self.ip = ip
-		self.port = port
-		self.authorizer = authorizer
-
-		if hostname is None:
-			self.hostname = socket.gethostname()
-		else:
-			self.hostname = hostname
-
-		# statistics
-		self.total_sessions = counter()
-		self.closed_sessions = counter()
-		self.total_files_out = counter()
-		self.total_files_in = counter()
-		self.total_bytes_out = counter()
-		self.total_bytes_in = counter()
-		self.total_exceptions = counter()
-		#
-		asyncore.dispatcher.__init__ (self)
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-
-		self.set_reuse_addr()
-		self.bind ((self.ip, self.port))
-		self.listen (5)
-
-		if not logger_object:
-			logger_object = sys.stdout
-
-		if resolver:
-			self.logger = logger.resolving_logger (resolver, logger_object)
-		else:
-			self.logger = logger.unresolving_logger (logger_object)
-
-		self.log_info('FTP server started at %s\n\tAuthorizer:%s\n\tHostname: %s\n\tPort: %d' % (
-			time.ctime(time.time()),
-			repr (self.authorizer),
-			self.hostname,
-			self.port)
-			)
-
-	def writable (self):
-		return 0
-
-	def handle_read (self):
-		pass
-
-	def handle_connect (self):
-		pass
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		self.total_sessions.increment()
-		self.log_info('Incoming connection from %s:%d' % (addr[0], addr[1]))
-		self.ftp_channel_class (self, conn, addr)
-
-	# return a producer describing the state of the server
-	def status (self):
-
-		def nice_bytes (n):
-			return string.join (status_handler.english_bytes (n))
-
-		return producers.lines_producer (
-			['<h2>%s</h2>'				% self.SERVER_IDENT,
-			 '<br>Listening on <b>Host:</b> %s' % self.hostname,
-			 '<b>Port:</b> %d'			% self.port,
-			 '<br>Sessions',
-			 '<b>Total:</b> %s'			% self.total_sessions,
-			 '<b>Current:</b> %d'		% (self.total_sessions.as_long() - self.closed_sessions.as_long()),
-			 '<br>Files',
-			 '<b>Sent:</b> %s'			% self.total_files_out,
-			 '<b>Received:</b> %s'		% self.total_files_in,
-			 '<br>Bytes',
-			 '<b>Sent:</b> %s'			% nice_bytes (self.total_bytes_out.as_long()),
-			 '<b>Received:</b> %s'		% nice_bytes (self.total_bytes_in.as_long()),
-			 '<br>Exceptions: %s'		% self.total_exceptions,
-			 ]
-			)
-
-# ======================================================================
-#						 Data Channel Classes
-# ======================================================================
-
-# This socket accepts a data connection, used when the server has been
-# placed in passive mode.  Although the RFC implies that we ought to
-# be able to use the same acceptor over and over again, this presents
-# a problem: how do we shut it off, so that we are accepting
-# connections only when we expect them?  [we can't]
-#
-# wuftpd, and probably all the other servers, solve this by allowing
-# only one connection to hit this acceptor.  They then close it.  Any
-# subsequent data-connection command will then try for the default
-# port on the client side [which is of course never there].  So the
-# 'always-send-PORT/PASV' behavior seems required.
-#
-# Another note: wuftpd will also be listening on the channel as soon
-# as the PASV command is sent.  It does not wait for a data command
-# first.
-
-# --- we need to queue up a particular behavior:
-#  1) xmit : queue up producer[s]
-#  2) recv : the file object
-#
-# It would be nice if we could make both channels the same.  Hmmm..
-#
-
+        # override this to spawn a different FTP channel class.
+    ftp_channel_class = ftp_channel
+    
+    SERVER_IDENT = 'FTP Server (V%s)' % VERSION
+    
+    def __init__ (
+            self,
+            authorizer,
+            hostname	=None,
+            ip			='',
+            port		=21,
+            resolver	=None,
+            logger_object=logger.file_logger (sys.stdout)
+            ):
+        self.ip = ip
+        self.port = port
+        self.authorizer = authorizer
+        
+        if hostname is None:
+            self.hostname = socket.gethostname()
+        else:
+            self.hostname = hostname
+            
+            # statistics
+        self.total_sessions = counter()
+        self.closed_sessions = counter()
+        self.total_files_out = counter()
+        self.total_files_in = counter()
+        self.total_bytes_out = counter()
+        self.total_bytes_in = counter()
+        self.total_exceptions = counter()
+        #
+        asyncore.dispatcher.__init__ (self)
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        
+        self.set_reuse_addr()
+        self.bind ((self.ip, self.port))
+        self.listen (5)
+        
+        if not logger_object:
+            logger_object = sys.stdout
+            
+        if resolver:
+            self.logger = logger.resolving_logger (resolver, logger_object)
+        else:
+            self.logger = logger.unresolving_logger (logger_object)
+            
+        self.log_info('FTP server started at %s\n\tAuthorizer:%s\n\tHostname: %s\n\tPort: %d' % (
+                time.ctime(time.time()),
+                repr (self.authorizer),
+                self.hostname,
+                self.port)
+                )
+        
+    def writable (self):
+        return 0
+        
+    def handle_read (self):
+        pass
+        
+    def handle_connect (self):
+        pass
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        self.total_sessions.increment()
+        self.log_info('Incoming connection from %s:%d' % (addr[0], addr[1]))
+        self.ftp_channel_class (self, conn, addr)
+        
+        # return a producer describing the state of the server
+    def status (self):
+    
+        def nice_bytes (n):
+            return string.join (status_handler.english_bytes (n))
+            
+        return producers.lines_producer (
+                ['<h2>%s</h2>'				% self.SERVER_IDENT,
+                 '<br>Listening on <b>Host:</b> %s' % self.hostname,
+                 '<b>Port:</b> %d'			% self.port,
+                 '<br>Sessions',
+                 '<b>Total:</b> %s'			% self.total_sessions,
+                 '<b>Current:</b> %d'		% (self.total_sessions.as_long() - self.closed_sessions.as_long()),
+                 '<br>Files',
+                 '<b>Sent:</b> %s'			% self.total_files_out,
+                 '<b>Received:</b> %s'		% self.total_files_in,
+                 '<br>Bytes',
+                 '<b>Sent:</b> %s'			% nice_bytes (self.total_bytes_out.as_long()),
+                 '<b>Received:</b> %s'		% nice_bytes (self.total_bytes_in.as_long()),
+                 '<br>Exceptions: %s'		% self.total_exceptions,
+                 ]
+                )
+        
+        # ======================================================================
+        #						 Data Channel Classes
+        # ======================================================================
+        
+        # This socket accepts a data connection, used when the server has been
+        # placed in passive mode.  Although the RFC implies that we ought to
+        # be able to use the same acceptor over and over again, this presents
+        # a problem: how do we shut it off, so that we are accepting
+        # connections only when we expect them?  [we can't]
+        #
+        # wuftpd, and probably all the other servers, solve this by allowing
+        # only one connection to hit this acceptor.  They then close it.  Any
+        # subsequent data-connection command will then try for the default
+        # port on the client side [which is of course never there].  So the
+        # 'always-send-PORT/PASV' behavior seems required.
+        #
+        # Another note: wuftpd will also be listening on the channel as soon
+        # as the PASV command is sent.  It does not wait for a data command
+        # first.
+        
+        # --- we need to queue up a particular behavior:
+        #  1) xmit : queue up producer[s]
+        #  2) recv : the file object
+        #
+        # It would be nice if we could make both channels the same.  Hmmm..
+        #
+        
 class passive_acceptor (asyncore.dispatcher):
-	ready = None
-
-	def __init__ (self, control_channel):
-		# connect_fun (conn, addr)
-		asyncore.dispatcher.__init__ (self)
-		self.control_channel = control_channel
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-		# bind to an address on the interface that the
-		# control connection is coming from.
-		self.bind ((
-			self.control_channel.getsockname()[0],
-			0
-			))
-		self.addr = self.getsockname()
-		self.listen (1)
-
-#	def __del__ (self):
-#		print 'passive_acceptor.__del__()'
-
-	def log (self, *ignore):
-		pass
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		dc = self.control_channel.client_dc
-		if dc is not None:
-			dc.set_socket (conn)
-			dc.addr = addr
-			dc.connected = 1
-			self.control_channel.passive_acceptor = None
-		else:
-			self.ready = conn, addr
-		self.close()
-
-
+    ready = None
+    
+    def __init__ (self, control_channel):
+            # connect_fun (conn, addr)
+        asyncore.dispatcher.__init__ (self)
+        self.control_channel = control_channel
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        # bind to an address on the interface that the
+        # control connection is coming from.
+        self.bind ((
+                self.control_channel.getsockname()[0],
+                0
+                ))
+        self.addr = self.getsockname()
+        self.listen (1)
+        
+        #	def __del__ (self):
+        #		print 'passive_acceptor.__del__()'
+        
+    def log (self, *ignore):
+        pass
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        dc = self.control_channel.client_dc
+        if dc is not None:
+            dc.set_socket (conn)
+            dc.addr = addr
+            dc.connected = 1
+            self.control_channel.passive_acceptor = None
+        else:
+            self.ready = conn, addr
+        self.close()
+        
+        
 class xmit_channel (asynchat.async_chat):
 
-	# for an ethernet, you want this to be fairly large, in fact, it
-	# _must_ be large for performance comparable to an ftpd.  [64k] we
-	# ought to investigate automatically-sized buffers...
-
-	ac_out_buffer_size = 16384
-	bytes_out = 0
-
-	def __init__ (self, channel, client_addr=None):
-		self.channel = channel
-		self.client_addr = client_addr
-		asynchat.async_chat.__init__ (self)
-		
-#	def __del__ (self):
-#		print 'xmit_channel.__del__()'
-
-	def log (*args):
-		pass
-
-	def readable (self):
-		return not self.connected
-
-	def writable (self):
-		return 1
-
-	def send (self, data):
-		result = asynchat.async_chat.send (self, data)
-		self.bytes_out = self.bytes_out + result
-		return result
-
-	def handle_error (self):
-		# usually this is to catch an unexpected disconnect.
-		self.log_info ('unexpected disconnect on data xmit channel', 'error')
-		try:
-			self.close()
-		except:
-			pass
-
-	# TODO: there's a better way to do this.  we need to be able to
-	# put 'events' in the producer fifo.  to do this cleanly we need
-	# to reposition the 'producer' fifo as an 'event' fifo.
-
-	def close (self):
-		c = self.channel
-		s = c.server
-		c.client_dc = None
-		s.total_files_out.increment()
-		s.total_bytes_out.increment (self.bytes_out)
-		if not len(self.producer_fifo):
-			c.respond ('226 Transfer complete')
-		elif not c.closed:
-			c.respond ('426 Connection closed; transfer aborted')
-		del c
-		del s
-		del self.channel
-		asynchat.async_chat.close (self)
-
+        # for an ethernet, you want this to be fairly large, in fact, it
+        # _must_ be large for performance comparable to an ftpd.  [64k] we
+        # ought to investigate automatically-sized buffers...
+
+    ac_out_buffer_size = 16384
+    bytes_out = 0
+    
+    def __init__ (self, channel, client_addr=None):
+        self.channel = channel
+        self.client_addr = client_addr
+        asynchat.async_chat.__init__ (self)
+        
+        #	def __del__ (self):
+        #		print 'xmit_channel.__del__()'
+        
+    def log (*args):
+        pass
+        
+    def readable (self):
+        return not self.connected
+        
+    def writable (self):
+        return 1
+        
+    def send (self, data):
+        result = asynchat.async_chat.send (self, data)
+        self.bytes_out = self.bytes_out + result
+        return result
+        
+    def handle_error (self):
+            # usually this is to catch an unexpected disconnect.
+        self.log_info ('unexpected disconnect on data xmit channel', 'error')
+        try:
+            self.close()
+        except:
+            pass
+            
+            # TODO: there's a better way to do this.  we need to be able to
+            # put 'events' in the producer fifo.  to do this cleanly we need
+            # to reposition the 'producer' fifo as an 'event' fifo.
+            
+    def close (self):
+        c = self.channel
+        s = c.server
+        c.client_dc = None
+        s.total_files_out.increment()
+        s.total_bytes_out.increment (self.bytes_out)
+        if not len(self.producer_fifo):
+            c.respond ('226 Transfer complete')
+        elif not c.closed:
+            c.respond ('426 Connection closed; transfer aborted')
+        del c
+        del s
+        del self.channel
+        asynchat.async_chat.close (self)
+        
 class recv_channel (asyncore.dispatcher):
-	def __init__ (self, channel, client_addr, fd):
-		self.channel = channel
-		self.client_addr = client_addr
-		self.fd = fd
-		asyncore.dispatcher.__init__ (self)
-		self.bytes_in = counter()
-
-	def log (self, *ignore):
-		pass
-
-	def handle_connect (self):
-		pass
-
-	def writable (self):
-		return 0
-
-	def recv (*args):
-		result = apply (asyncore.dispatcher.recv, args)
-		self = args[0]
-		self.bytes_in.increment(len(result))
-		return result
-
-	buffer_size = 8192
-
-	def handle_read (self):
-		block = self.recv (self.buffer_size)
-		if block:
-			try:
-				self.fd.write (block)
-			except IOError:
-				self.log_info ('got exception writing block...', 'error')
-
-	def handle_close (self):
-		s = self.channel.server
-		s.total_files_in.increment()
-		s.total_bytes_in.increment(self.bytes_in.as_long())
-		self.fd.close()
-		self.channel.respond ('226 Transfer complete.')
-		self.close()
-
+    def __init__ (self, channel, client_addr, fd):
+        self.channel = channel
+        self.client_addr = client_addr
+        self.fd = fd
+        asyncore.dispatcher.__init__ (self)
+        self.bytes_in = counter()
+        
+    def log (self, *ignore):
+        pass
+        
+    def handle_connect (self):
+        pass
+        
+    def writable (self):
+        return 0
+        
+    def recv (*args):
+        result = apply (asyncore.dispatcher.recv, args)
+        self = args[0]
+        self.bytes_in.increment(len(result))
+        return result
+        
+    buffer_size = 8192
+    
+    def handle_read (self):
+        block = self.recv (self.buffer_size)
+        if block:
+            try:
+                self.fd.write (block)
+            except IOError:
+                self.log_info ('got exception writing block...', 'error')
+                
+    def handle_close (self):
+        s = self.channel.server
+        s.total_files_in.increment()
+        s.total_bytes_in.increment(self.bytes_in.as_long())
+        self.fd.close()
+        self.channel.respond ('226 Transfer complete.')
+        self.close()
+        
 import filesys
 
 # not much of a doorman! 8^)
 class dummy_authorizer:
-	def __init__ (self, root='/'):
-		self.root = root
-	def authorize (self, channel, username, password):
-		channel.persona = -1, -1
-		channel.read_only = 1
-		return 1, 'Ok.', filesys.os_filesystem (self.root)
-
+    def __init__ (self, root='/'):
+        self.root = root
+    def authorize (self, channel, username, password):
+        channel.persona = -1, -1
+        channel.read_only = 1
+        return 1, 'Ok.', filesys.os_filesystem (self.root)
+        
 class anon_authorizer:
-	def __init__ (self, root='/'):
-		self.root = root
-		
-	def authorize (self, channel, username, password):
-		if username in ('ftp', 'anonymous'):
-			channel.persona = -1, -1
-			channel.read_only = 1
-			return 1, 'Ok.', filesys.os_filesystem (self.root)
-		else:
-			return 0, 'Password invalid.', None
-
-# ===========================================================================
-# Unix-specific improvements
-# ===========================================================================
-
+    def __init__ (self, root='/'):
+        self.root = root
+        
+    def authorize (self, channel, username, password):
+        if username in ('ftp', 'anonymous'):
+            channel.persona = -1, -1
+            channel.read_only = 1
+            return 1, 'Ok.', filesys.os_filesystem (self.root)
+        else:
+            return 0, 'Password invalid.', None
+            
+            # ===========================================================================
+            # Unix-specific improvements
+            # ===========================================================================
+            
 if os.name == 'posix':
-
-	class unix_authorizer:
-		# return a trio of (success, reply_string, filesystem)
-		def authorize (self, channel, username, password):
-			import crypt
-			import pwd
-			try:
-				info = pwd.getpwnam (username)
-			except KeyError:
-				return 0, 'No such user.', None
-			mangled = info[1]
-			if crypt.crypt (password, mangled[:2]) == mangled:
-				channel.read_only = 0
-				fs = filesys.schizophrenic_unix_filesystem (
-					'/',
-					info[5],
-					persona = (info[2], info[3])
-					)
-				return 1, 'Login successful.', fs
-			else:
-				return 0, 'Password invalid.', None
-
-		def __repr__ (self):
-			return '<standard unix authorizer>'
 
-	# simple anonymous ftp support
-	class unix_authorizer_with_anonymous (unix_authorizer):
-		def __init__ (self, root=None, real_users=0):
-			self.root = root
-			self.real_users = real_users
-
-		def authorize (self, channel, username, password):
-			if string.lower(username) in ['anonymous', 'ftp']:
-				import pwd
-				try:
-					# ok, here we run into lots of confusion.
-					# on some os', anon runs under user 'nobody',
-					# on others as 'ftp'.  ownership is also critical.
-					# need to investigate.
-					# linux: new linuxen seem to have nobody's UID=-1,
-					#    which is an illegal value.  Use ftp.
-					ftp_user_info = pwd.getpwnam ('ftp')
-					if string.lower(os.uname()[0]) == 'linux':
-						nobody_user_info = pwd.getpwnam ('ftp')
-					else:
-						nobody_user_info = pwd.getpwnam ('nobody')
-					channel.read_only = 1
-					if self.root is None:
-						self.root = ftp_user_info[5]
-					fs = filesys.unix_filesystem (self.root, '/')
-					return 1, 'Anonymous Login Successful', fs
-				except KeyError:
-					return 0, 'Anonymous account not set up', None
-			elif self.real_users:
-				return unix_authorizer.authorize (
-					self,
-					channel,
-					username,
-					password
-					)
-			else:
-				return 0, 'User logins not allowed', None
-
+    class unix_authorizer:
+            # return a trio of (success, reply_string, filesystem)
+        def authorize (self, channel, username, password):
+            import crypt
+            import pwd
+            try:
+                info = pwd.getpwnam (username)
+            except KeyError:
+                return 0, 'No such user.', None
+            mangled = info[1]
+            if crypt.crypt (password, mangled[:2]) == mangled:
+                channel.read_only = 0
+                fs = filesys.schizophrenic_unix_filesystem (
+                        '/',
+                        info[5],
+                        persona = (info[2], info[3])
+                        )
+                return 1, 'Login successful.', fs
+            else:
+                return 0, 'Password invalid.', None
+                
+        def __repr__ (self):
+            return '<standard unix authorizer>'
+            
+            # simple anonymous ftp support
+    class unix_authorizer_with_anonymous (unix_authorizer):
+        def __init__ (self, root=None, real_users=0):
+            self.root = root
+            self.real_users = real_users
+            
+        def authorize (self, channel, username, password):
+            if string.lower(username) in ['anonymous', 'ftp']:
+                import pwd
+                try:
+                        # ok, here we run into lots of confusion.
+                        # on some os', anon runs under user 'nobody',
+                        # on others as 'ftp'.  ownership is also critical.
+                        # need to investigate.
+                        # linux: new linuxen seem to have nobody's UID=-1,
+                        #    which is an illegal value.  Use ftp.
+                    ftp_user_info = pwd.getpwnam ('ftp')
+                    if string.lower(os.uname()[0]) == 'linux':
+                        nobody_user_info = pwd.getpwnam ('ftp')
+                    else:
+                        nobody_user_info = pwd.getpwnam ('nobody')
+                    channel.read_only = 1
+                    if self.root is None:
+                        self.root = ftp_user_info[5]
+                    fs = filesys.unix_filesystem (self.root, '/')
+                    return 1, 'Anonymous Login Successful', fs
+                except KeyError:
+                    return 0, 'Anonymous account not set up', None
+            elif self.real_users:
+                return unix_authorizer.authorize (
+                        self,
+                        channel,
+                        username,
+                        password
+                        )
+            else:
+                return 0, 'User logins not allowed', None
+                
 class file_producer:
-	block_size = 16384
-	def __init__ (self, server, dc, fd):
-		self.fd = fd
-		self.done = 0
-		
-	def more (self):
-		if self.done:
-			return ''
-		else:
-			block = self.fd.read (self.block_size)
-			if not block:
-				self.fd.close()
-				self.done = 1
-			return block
-
-# usage: ftp_server /PATH/TO/FTP/ROOT PORT
-# for example:
-# $ ftp_server /home/users/ftp 8021
-
+    block_size = 16384
+    def __init__ (self, server, dc, fd):
+        self.fd = fd
+        self.done = 0
+        
+    def more (self):
+        if self.done:
+            return ''
+        else:
+            block = self.fd.read (self.block_size)
+            if not block:
+                self.fd.close()
+                self.done = 1
+            return block
+            
+            # usage: ftp_server /PATH/TO/FTP/ROOT PORT
+            # for example:
+            # $ ftp_server /home/users/ftp 8021
+            
 if os.name == 'posix':
-	def test (port='8021'):
-		import sys
-		fs = ftp_server (
-			unix_authorizer(),
-			port=string.atoi (port)
-			)
-		try:
-			asyncore.loop()
-		except KeyboardInterrupt:
-			self.log_info('FTP server shutting down. (received SIGINT)', 'warning')
-			# close everything down on SIGINT.
-			# of course this should be a cleaner shutdown.
-			asyncore.close_all()
-
-	if __name__ == '__main__':
-		test (sys.argv[1])
-# not unix
+    def test (port='8021'):
+        import sys
+        fs = ftp_server (
+                unix_authorizer(),
+                port=string.atoi (port)
+                )
+        try:
+            asyncore.loop()
+        except KeyboardInterrupt:
+            self.log_info('FTP server shutting down. (received SIGINT)', 'warning')
+            # close everything down on SIGINT.
+            # of course this should be a cleaner shutdown.
+            asyncore.close_all()
+            
+    if __name__ == '__main__':
+        test (sys.argv[1])
+        # not unix
 else:
-	def test ():
-		fs = ftp_server (dummy_authorizer())
-	if __name__ == '__main__':
-		test ()
-
-# this is the command list from the wuftpd man page
-# '*' means we've implemented it.
-# '!' requires write access
-#
+    def test ():
+        fs = ftp_server (dummy_authorizer())
+    if __name__ == '__main__':
+        test ()
+        
+        # this is the command list from the wuftpd man page
+        # '*' means we've implemented it.
+        # '!' requires write access
+        #
 command_documentation = {
-	'abor':	'abort previous command',							#*
-	'acct':	'specify account (ignored)',
-	'allo':	'allocate storage (vacuously)',
-	'appe':	'append to a file',									#*!
-	'cdup':	'change to parent of current working directory',	#*
-	'cwd':	'change working directory',							#*
-	'dele':	'delete a file',									#!
-	'help':	'give help information',							#*
-	'list':	'give list files in a directory',					#*
-	'mkd':	'make a directory',									#!
-	'mdtm':	'show last modification time of file',				#*
-	'mode':	'specify data transfer mode',
-	'nlst':	'give name list of files in directory',				#*
-	'noop':	'do nothing',										#*
-	'pass':	'specify password',									#*
-	'pasv':	'prepare for server-to-server transfer',			#*
-	'port':	'specify data connection port',						#*
-	'pwd':	'print the current working directory',				#*
-	'quit':	'terminate session',								#*
-	'rest':	'restart incomplete transfer',						#*
-	'retr':	'retrieve a file',									#*
-	'rmd':	'remove a directory',								#!
-	'rnfr':	'specify rename-from file name',					#!
-	'rnto':	'specify rename-to file name',						#!
-	'site':	'non-standard commands (see next section)',
-	'size':	'return size of file',								#*
-	'stat':	'return status of server',							#*
-	'stor':	'store a file',										#*!
-	'stou':	'store a file with a unique name',					#!
-	'stru':	'specify data transfer structure',
-	'syst':	'show operating system type of server system',		#*
-	'type':	'specify data transfer type',						#*
-	'user':	'specify user name',								#*
-	'xcup':	'change to parent of current working directory (deprecated)',
-	'xcwd':	'change working directory (deprecated)',
-	'xmkd':	'make a directory (deprecated)',					#!
-	'xpwd':	'print the current working directory (deprecated)',
-	'xrmd':	'remove a directory (deprecated)',					#!
+        'abor':	'abort previous command',							#*
+        'acct':	'specify account (ignored)',
+        'allo':	'allocate storage (vacuously)',
+        'appe':	'append to a file',									#*!
+        'cdup':	'change to parent of current working directory',	#*
+        'cwd':	'change working directory',							#*
+        'dele':	'delete a file',									#!
+        'help':	'give help information',							#*
+        'list':	'give list files in a directory',					#*
+        'mkd':	'make a directory',									#!
+        'mdtm':	'show last modification time of file',				#*
+        'mode':	'specify data transfer mode',
+        'nlst':	'give name list of files in directory',				#*
+        'noop':	'do nothing',										#*
+        'pass':	'specify password',									#*
+        'pasv':	'prepare for server-to-server transfer',			#*
+        'port':	'specify data connection port',						#*
+        'pwd':	'print the current working directory',				#*
+        'quit':	'terminate session',								#*
+        'rest':	'restart incomplete transfer',						#*
+        'retr':	'retrieve a file',									#*
+        'rmd':	'remove a directory',								#!
+        'rnfr':	'specify rename-from file name',					#!
+        'rnto':	'specify rename-to file name',						#!
+        'site':	'non-standard commands (see next section)',
+        'size':	'return size of file',								#*
+        'stat':	'return status of server',							#*
+        'stor':	'store a file',										#*!
+        'stou':	'store a file with a unique name',					#!
+        'stru':	'specify data transfer structure',
+        'syst':	'show operating system type of server system',		#*
+        'type':	'specify data transfer type',						#*
+        'user':	'specify user name',								#*
+        'xcup':	'change to parent of current working directory (deprecated)',
+        'xcwd':	'change working directory (deprecated)',
+        'xmkd':	'make a directory (deprecated)',					#!
+        'xpwd':	'print the current working directory (deprecated)',
+        'xrmd':	'remove a directory (deprecated)',					#!
 }
 
 
 # debugging aid (linux)
 def get_vm_size ():
-	return string.atoi (string.split(open ('/proc/self/stat').readline())[22])
-
+    return string.atoi (string.split(open ('/proc/self/stat').readline())[22])
+    
 def print_vm():
-	print 'vm: %8dk' % (get_vm_size()/1024)
+    print 'vm: %8dk' % (get_vm_size()/1024)

--- Updated File http_bobo.py in package Zope2 --
--- http_bobo.py	2001/04/26 00:07:52	1.3
+++ http_bobo.py	2001/05/01 11:44:48	1.4
@@ -7,69 +7,69 @@
 VERSION_STRING = string.split(RCS_ID)[2]
 
 class bobo_extension:
-	hits = 0
-
-	SERVER_IDENT = 'Bobo Extension (V%s)' % VERSION_STRING
-
-	def __init__ (self, regexp):
-		self.regexp = regex.compile (regexp)
-
-	def __repr__ (self):
-		return '<Bobo Extension <b>(%d hits)</b> at %x>' % (
-			self.hits,
-			id (self)
-			)
-
-	def match (self, path_part):
-		if self.regexp.match (path_part) == len(path_part):
-			return 1
-		else:
-			return 0
-
-	def status (self):
-		return mstatus.lines_producer ([
-			'<h2>%s</h2>'  						%self.SERVER_IDENT,
-			'<br><b>Total Hits:</b> %d'			% self.hits,
-			])
-
-	def handle_request (self, channel):
-		self.hits = self.hits + 1
-
-		[path, params, query, fragment] = channel.uri
-
-		if query:
-			# cgi_publisher_module doesn't want the leading '?'
-			query = query[1:]
-
-		env = {}
-		env['REQUEST_METHOD']	= method
-		env['SERVER_PORT']		= channel.server.port
-		env['SERVER_NAME']		= channel.server.server_name
-		env['SCRIPT_NAME']		= module_name
-		env['QUERY_STRING']		= query
-		env['PATH_INFO']		= string.join (path_parts[1:],'/')
-
-		# this should really be done with with a real producer.  just
-		# have to make sure it can handle all of the file object api.
-
-		sin  = StringIO.StringIO('')
-		sout = StringIO.StringIO()
-		serr = StringIO.StringIO()
-
-		cgi_module_publisher.publish_module (
-			module_name,
-			stdin=sin,
-			stdout=sout,
-			stderr=serr,
-			environ=env,
-			debug=1
-			)
-		
-		channel.push (
-			channel.response (200) + \
-			channel.generated_content_header (path)
-			)
-
-		self.push (sout.getvalue())
-		self.push (serr.getvalue())
-		self.close_when_done()
+    hits = 0
+    
+    SERVER_IDENT = 'Bobo Extension (V%s)' % VERSION_STRING
+    
+    def __init__ (self, regexp):
+        self.regexp = regex.compile (regexp)
+        
+    def __repr__ (self):
+        return '<Bobo Extension <b>(%d hits)</b> at %x>' % (
+                self.hits,
+                id (self)
+                )
+        
+    def match (self, path_part):
+        if self.regexp.match (path_part) == len(path_part):
+            return 1
+        else:
+            return 0
+            
+    def status (self):
+        return mstatus.lines_producer ([
+                '<h2>%s</h2>'  						%self.SERVER_IDENT,
+                '<br><b>Total Hits:</b> %d'			% self.hits,
+                ])
+        
+    def handle_request (self, channel):
+        self.hits = self.hits + 1
+        
+        [path, params, query, fragment] = channel.uri
+        
+        if query:
+                # cgi_publisher_module doesn't want the leading '?'
+            query = query[1:]
+            
+        env = {}
+        env['REQUEST_METHOD']	= method
+        env['SERVER_PORT']		= channel.server.port
+        env['SERVER_NAME']		= channel.server.server_name
+        env['SCRIPT_NAME']		= module_name
+        env['QUERY_STRING']		= query
+        env['PATH_INFO']		= string.join (path_parts[1:],'/')
+        
+        # this should really be done with with a real producer.  just
+        # have to make sure it can handle all of the file object api.
+        
+        sin  = StringIO.StringIO('')
+        sout = StringIO.StringIO()
+        serr = StringIO.StringIO()
+        
+        cgi_module_publisher.publish_module (
+                module_name,
+                stdin=sin,
+                stdout=sout,
+                stderr=serr,
+                environ=env,
+                debug=1
+                )
+        
+        channel.push (
+                channel.response (200) + \
+                channel.generated_content_header (path)
+                )
+        
+        self.push (sout.getvalue())
+        self.push (serr.getvalue())
+        self.close_when_done()

--- Updated File http_date.py in package Zope2 --
--- http_date.py	2001/04/25 19:07:31	1.7
+++ http_date.py	2001/05/01 11:44:48	1.8
@@ -5,14 +5,14 @@
 import time
 
 def concat (*args):
-	return ''.join (args)	
-
+    return ''.join (args)	
+    
 def join (seq, field=' '):
-	return field.join (seq)
-
+    return field.join (seq)
+    
 def group (s):
-	return '(' + s + ')'
-
+    return '(' + s + ')'
+    
 short_days = ['sun','mon','tue','wed','thu','fri','sat']
 long_days = ['sunday','monday','tuesday','wednesday','thursday','friday','saturday']
 
@@ -21,17 +21,17 @@
 
 daymap = {}
 for i in range(7):
-	daymap[short_days[i]] = i
-	daymap[long_days[i]] = i
-
+    daymap[short_days[i]] = i
+    daymap[long_days[i]] = i
+    
 hms_reg = join (3 * [group('[0-9][0-9]')], ':')
 
 months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
 
 monmap = {}
 for i in range(12):
-	monmap[months[i]] = i+1
-
+    monmap[months[i]] = i+1
+    
 months_reg = group (join (months, '|'))
 
 # From draft-ietf-http-v11-spec-07.txt/3.3.1
@@ -41,86 +41,86 @@
 
 # rfc822 format
 rfc822_date = join (
-	[concat (short_day_reg,','),	# day
-	 group('[0-9][0-9]?'),			# date
-	 months_reg,					# month
-	 group('[0-9]+'),				# year
-	 hms_reg,						# hour minute second
-	 'gmt'
-	 ],
-	' '
-	)
+        [concat (short_day_reg,','),	# day
+         group('[0-9][0-9]?'),			# date
+         months_reg,					# month
+         group('[0-9]+'),				# year
+         hms_reg,						# hour minute second
+         'gmt'
+         ],
+        ' '
+        )
 
 rfc822_reg = re.compile (rfc822_date)
 
 def unpack_rfc822 (m):
-	g = m.group
-	a = string.atoi
-	return (
-		a(g(4)),	   	# year
-		monmap[g(3)],	# month
-		a(g(2)),		# day
-		a(g(5)),		# hour
-		a(g(6)),		# minute
-		a(g(7)),		# second
-		0,
-		0,
-		0
-		)
-
-# rfc850 format
+    g = m.group
+    a = string.atoi
+    return (
+            a(g(4)),	   	# year
+            monmap[g(3)],	# month
+            a(g(2)),		# day
+            a(g(5)),		# hour
+            a(g(6)),		# minute
+            a(g(7)),		# second
+            0,
+            0,
+            0
+            )
+    
+    # rfc850 format
 rfc850_date = join (
-	[concat (long_day_reg,','),
-	 join (
-		 [group ('[0-9][0-9]?'),
-		  months_reg,
-		  group ('[0-9]+')
-		  ],
-		 '-'
-		 ),
-	 hms_reg,
-	 'gmt'
-	 ],
-	' '
-	)
+        [concat (long_day_reg,','),
+         join (
+                 [group ('[0-9][0-9]?'),
+                  months_reg,
+                  group ('[0-9]+')
+                  ],
+                 '-'
+                 ),
+         hms_reg,
+         'gmt'
+         ],
+        ' '
+        )
 
 rfc850_reg = re.compile (rfc850_date)
 # they actually unpack the same way
 def unpack_rfc850 (m):
-	g = m.group
-	a = string.atoi
-	return (
-		a(g(4)),	   	# year
-		monmap[g(3)],	# month
-		a(g(2)),		# day
-		a(g(5)),		# hour
-		a(g(6)),		# minute
-		a(g(7)),		# second
-		0,
-		0,
-		0
-		)
-
-# parsdate.parsedate	- ~700/sec.
-# parse_http_date    	- ~1333/sec.
-
+    g = m.group
+    a = string.atoi
+    return (
+            a(g(4)),	   	# year
+            monmap[g(3)],	# month
+            a(g(2)),		# day
+            a(g(5)),		# hour
+            a(g(6)),		# minute
+            a(g(7)),		# second
+            0,
+            0,
+            0
+            )
+    
+    # parsdate.parsedate	- ~700/sec.
+    # parse_http_date    	- ~1333/sec.
+    
 def build_http_date (when):
-	return time.strftime ('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(when))
-
+    return time.strftime ('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(when))
+    
 def parse_http_date (d):
-	d = string.lower (d)
-	tz = time.timezone
-	m = rfc850_reg.match (d)
-	if m and m.end() == len(d):
-		retval = int (time.mktime (unpack_rfc850(m)) - tz)
-	else:
-		m = rfc822_reg.match (d)
-		if m and m.end() == len(d):
-			retval = int (time.mktime (unpack_rfc822(m)) - tz)
-		else:
-			return 0
-	# Thanks to Craig Silverstein <csilvers@google.com> for pointing
-	# out the DST discrepancy
-	if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
-		retval = retval + (tz - time.altzone)
-	return retval
+    d = string.lower (d)
+    tz = time.timezone
+    m = rfc850_reg.match (d)
+    if m and m.end() == len(d):
+        retval = int (time.mktime (unpack_rfc850(m)) - tz)
+    else:
+        m = rfc822_reg.match (d)
+        if m and m.end() == len(d):
+            retval = int (time.mktime (unpack_rfc822(m)) - tz)
+        else:
+            return 0
+            # Thanks to Craig Silverstein <csilvers@google.com> for pointing
+            # out the DST discrepancy
+    if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
+        retval = retval + (tz - time.altzone)
+    return retval

--- Updated File http_server.py in package Zope2 --
--- http_server.py	2001/04/30 14:38:40	1.23
+++ http_server.py	2001/05/01 11:44:48	1.24
@@ -38,755 +38,755 @@
 
 class http_request:
 
-	# default reply code
-	reply_code = 200
-
-	request_counter = counter()
-
-	# Whether to automatically use chunked encoding when
-	# 
-	#   HTTP version is 1.1
-	#   Content-Length is not set
-	#   Chunked encoding is not already in effect
-	#
-	# If your clients are having trouble, you might want to disable this.
-	use_chunked = 1
-	
-	# by default, this request object ignores user data.
-	collector = None
-
-	def __init__ (self, *args):
-		# unpack information about the request
-		(self.channel, self.request,
-		 self.command, self.uri, self.version,
-		 self.header) = args
-
-		self.outgoing = fifo()
-		self.reply_headers = {
-			'Server'	: 'Medusa/%s' % VERSION_STRING,
-			'Date'		: http_date.build_http_date (time.time())
-			}
-		self.request_number = http_request.request_counter.increment()
-		self._split_uri = None
-		self._header_cache = {}
-
-	# --------------------------------------------------
-	# reply header management
-	# --------------------------------------------------
-	def __setitem__ (self, key, value):
-		self.reply_headers[key] = value
-
-	def __getitem__ (self, key):
-		return self.reply_headers[key]
-
-	def has_key (self, key):
-		return self.reply_headers.has_key (key)
-
-	def build_reply_header (self):
-		return string.join (
-			[self.response(self.reply_code)] + map (
-				lambda x: '%s: %s' % x,
-				self.reply_headers.items()
-				),
-			'\r\n'
-			) + '\r\n\r\n'
-
-	# --------------------------------------------------
-	# split a uri
-	# --------------------------------------------------
-
-	# <path>;<params>?<query>#<fragment>
-	path_regex = re.compile (
-	#      path      params    query   fragment
-		r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?'
-		)
-
-	def split_uri (self):
-		if self._split_uri is None:
-			m = self.path_regex.match (self.uri)
-			if m.end() != len(self.uri):
-				raise ValueError, "Broken URI"
-			else:
-				self._split_uri = m.groups()
-		return self._split_uri
-
-	def get_header_with_regex (self, head_reg, group):
-		for line in self.header:
-			m = head_reg.match (line)
-			if m.end() == len(line):
-				return head_reg.group (group)
-		return ''
-
-	def get_header (self, header):
-		header = string.lower (header)
-		hc = self._header_cache
-		if not hc.has_key (header):
-			h = header + ': '
-			hl = len(h)
-			for line in self.header:
-				if string.lower (line[:hl]) == h:
-					r = line[hl:]
-					hc[header] = r
-					return r
-			hc[header] = None
-			return None
-		else:
-			return hc[header]
-
-	# --------------------------------------------------
-	# user data
-	# --------------------------------------------------
-
-	def collect_incoming_data (self, data):
-		if self.collector:
-			self.collector.collect_incoming_data (data)
-		else:
-			self.log_info(
-				'Dropping %d bytes of incoming request data' % len(data),
-				'warning'
-				)
-
-	def found_terminator (self):
-		if self.collector:
-			self.collector.found_terminator()
-		else:
-			self.log_info (
-				'Unexpected end-of-record for incoming request',
-				'warning'
-				)
-
-	def push (self, thing):
-		if type(thing) == type(''):
-			self.outgoing.push (producers.simple_producer (thing))
-		else:
-			self.outgoing.push (thing)
-
-	def response (self, code=200):
-		message = self.responses[code]
-		self.reply_code = code
-		return 'HTTP/%s %d %s' % (self.version, code, message)
-
-	def error (self, code):
-		self.reply_code = code
-		message = self.responses[code]
-		s = self.DEFAULT_ERROR_MESSAGE % {
-			'code': code,
-			'message': message,
-			}
-		self['Content-Length'] = len(s)
-		self['Content-Type'] = 'text/html'
-		# make an error reply
-		self.push (s)
-		self.done()
-
-	# can also be used for empty replies
-	reply_now = error
-
-	def done (self):
-		"finalize this transaction - send output to the http channel"
-
-		# ----------------------------------------
-		# persistent connection management
-		# ----------------------------------------
-
-		#  --- BUCKLE UP! ----
-
-		connection = string.lower (get_header (CONNECTION, self.header))
-
-		close_it = 0
-		wrap_in_chunking = 0
-
-		if self.version == '1.0':
-			if connection == 'keep-alive':
-				if not self.has_key ('Content-Length'):
-					close_it = 1
-				else:
-					self['Connection'] = 'Keep-Alive'
-			else:
-				close_it = 1
-		elif self.version == '1.1':
-			if connection == 'close':
-				close_it = 1
-			elif not self.has_key ('Content-Length'):
-				if self.has_key ('Transfer-Encoding'):
-					if not self['Transfer-Encoding'] == 'chunked':
-						close_it = 1
-				elif self.use_chunked:
-					self['Transfer-Encoding'] = 'chunked'
-					wrap_in_chunking = 1
-				else:
-					close_it = 1
-		elif self.version is None:
-			# Although we don't *really* support http/0.9 (because we'd have to
-			# use \r\n as a terminator, and it would just yuck up a lot of stuff)
-			# it's very common for developers to not want to type a version number
-			# when using telnet to debug a server.
-			close_it = 1
-					
-		outgoing_header = producers.simple_producer (self.build_reply_header())
-
-		if close_it:
-			self['Connection'] = 'close'
-
-		if wrap_in_chunking:
-			outgoing_producer = producers.chunked_producer (
-				producers.composite_producer (self.outgoing)
-				)
-			# prepend the header
-			outgoing_producer = producers.composite_producer (
-				fifo([outgoing_header, outgoing_producer])
-				)
-		else:
-			# prepend the header
-			self.outgoing.push_front (outgoing_header)
-			outgoing_producer = producers.composite_producer (self.outgoing)
-
-		# apply a few final transformations to the output
-		self.channel.push_with_producer (
-			# globbing gives us large packets
-			producers.globbing_producer (
-				# hooking lets us log the number of bytes sent
-				producers.hooked_producer (
-					outgoing_producer,
-					self.log
-					)
-				)
-			)
-
-		self.channel.current_request = None
-
-		if close_it:
-			self.channel.close_when_done()
-
-	def log_date_string (self, when):
-		return time.strftime (
-			'%d/%b/%Y:%H:%M:%S ',
-			time.gmtime(when)
-			) + tz_for_log
-
-	def log (self, bytes):
-		self.channel.server.logger.log (
-			self.channel.addr[0],
-			' - - [%s] "%s" %d %d\n' % (
-				self.log_date_string (time.time()),
-				self.request,
-				self.reply_code,
-				bytes
-				)
-			)
-
-	responses = {
-		100: "Continue",
-		101: "Switching Protocols",
-		200: "OK",
-		201: "Created",
-		202: "Accepted",
-		203: "Non-Authoritative Information",
-		204: "No Content",
-		205: "Reset Content",
-		206: "Partial Content",
-		300: "Multiple Choices",
-		301: "Moved Permanently",
-		302: "Moved Temporarily",
-		303: "See Other",
-		304: "Not Modified",
-		305: "Use Proxy",
-		400: "Bad Request",
-		401: "Unauthorized",
-		402: "Payment Required",
-		403: "Forbidden",
-		404: "Not Found",
-		405: "Method Not Allowed",
-		406: "Not Acceptable",
-		407: "Proxy Authentication Required",
-		408: "Request Time-out",
-		409: "Conflict",
-		410: "Gone",
-		411: "Length Required",
-		412: "Precondition Failed",
-		413: "Request Entity Too Large",
-		414: "Request-URI Too Large",
-		415: "Unsupported Media Type",
-		500: "Internal Server Error",
-		501: "Not Implemented",
-		502: "Bad Gateway",
-		503: "Service Unavailable",
-		504: "Gateway Time-out",
-		505: "HTTP Version not supported"
-		}
-
-	# Default error message
-	DEFAULT_ERROR_MESSAGE = string.join (
-		['<head>',
-		 '<title>Error response</title>',
-		 '</head>',
-		 '<body>',
-		 '<h1>Error response</h1>',
-		 '<p>Error code %(code)d.',
-		 '<p>Message: %(message)s.',
-		 '</body>',
-		 ''
-		 ],
-		'\r\n'
-		)
-
-
-# ===========================================================================
-#						 HTTP Channel Object
-# ===========================================================================
-
+        # default reply code
+    reply_code = 200
+    
+    request_counter = counter()
+    
+    # Whether to automatically use chunked encoding when
+    # 
+    #   HTTP version is 1.1
+    #   Content-Length is not set
+    #   Chunked encoding is not already in effect
+    #
+    # If your clients are having trouble, you might want to disable this.
+    use_chunked = 1
+    
+    # by default, this request object ignores user data.
+    collector = None
+    
+    def __init__ (self, *args):
+            # unpack information about the request
+        (self.channel, self.request,
+         self.command, self.uri, self.version,
+         self.header) = args
+        
+        self.outgoing = fifo()
+        self.reply_headers = {
+                'Server'	: 'Medusa/%s' % VERSION_STRING,
+                'Date'		: http_date.build_http_date (time.time())
+                }
+        self.request_number = http_request.request_counter.increment()
+        self._split_uri = None
+        self._header_cache = {}
+        
+        # --------------------------------------------------
+        # reply header management
+        # --------------------------------------------------
+    def __setitem__ (self, key, value):
+        self.reply_headers[key] = value
+        
+    def __getitem__ (self, key):
+        return self.reply_headers[key]
+        
+    def has_key (self, key):
+        return self.reply_headers.has_key (key)
+        
+    def build_reply_header (self):
+        return string.join (
+                [self.response(self.reply_code)] + map (
+                        lambda x: '%s: %s' % x,
+                        self.reply_headers.items()
+                        ),
+                '\r\n'
+                ) + '\r\n\r\n'
+        
+        # --------------------------------------------------
+        # split a uri
+        # --------------------------------------------------
+        
+        # <path>;<params>?<query>#<fragment>
+    path_regex = re.compile (
+    #      path      params    query   fragment
+            r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?'
+            )
+    
+    def split_uri (self):
+        if self._split_uri is None:
+            m = self.path_regex.match (self.uri)
+            if m.end() != len(self.uri):
+                raise ValueError, "Broken URI"
+            else:
+                self._split_uri = m.groups()
+        return self._split_uri
+        
+    def get_header_with_regex (self, head_reg, group):
+        for line in self.header:
+            m = head_reg.match (line)
+            if m.end() == len(line):
+                return head_reg.group (group)
+        return ''
+        
+    def get_header (self, header):
+        header = string.lower (header)
+        hc = self._header_cache
+        if not hc.has_key (header):
+            h = header + ': '
+            hl = len(h)
+            for line in self.header:
+                if string.lower (line[:hl]) == h:
+                    r = line[hl:]
+                    hc[header] = r
+                    return r
+            hc[header] = None
+            return None
+        else:
+            return hc[header]
+            
+            # --------------------------------------------------
+            # user data
+            # --------------------------------------------------
+            
+    def collect_incoming_data (self, data):
+        if self.collector:
+            self.collector.collect_incoming_data (data)
+        else:
+            self.log_info(
+                    'Dropping %d bytes of incoming request data' % len(data),
+                    'warning'
+                    )
+            
+    def found_terminator (self):
+        if self.collector:
+            self.collector.found_terminator()
+        else:
+            self.log_info (
+                    'Unexpected end-of-record for incoming request',
+                    'warning'
+                    )
+            
+    def push (self, thing):
+        if type(thing) == type(''):
+            self.outgoing.push (producers.simple_producer (thing))
+        else:
+            self.outgoing.push (thing)
+            
+    def response (self, code=200):
+        message = self.responses[code]
+        self.reply_code = code
+        return 'HTTP/%s %d %s' % (self.version, code, message)
+        
+    def error (self, code):
+        self.reply_code = code
+        message = self.responses[code]
+        s = self.DEFAULT_ERROR_MESSAGE % {
+                'code': code,
+                'message': message,
+                }
+        self['Content-Length'] = len(s)
+        self['Content-Type'] = 'text/html'
+        # make an error reply
+        self.push (s)
+        self.done()
+        
+        # can also be used for empty replies
+    reply_now = error
+    
+    def done (self):
+        "finalize this transaction - send output to the http channel"
+        
+        # ----------------------------------------
+        # persistent connection management
+        # ----------------------------------------
+        
+        #  --- BUCKLE UP! ----
+        
+        connection = string.lower (get_header (CONNECTION, self.header))
+        
+        close_it = 0
+        wrap_in_chunking = 0
+        
+        if self.version == '1.0':
+            if connection == 'keep-alive':
+                if not self.has_key ('Content-Length'):
+                    close_it = 1
+                else:
+                    self['Connection'] = 'Keep-Alive'
+            else:
+                close_it = 1
+        elif self.version == '1.1':
+            if connection == 'close':
+                close_it = 1
+            elif not self.has_key ('Content-Length'):
+                if self.has_key ('Transfer-Encoding'):
+                    if not self['Transfer-Encoding'] == 'chunked':
+                        close_it = 1
+                elif self.use_chunked:
+                    self['Transfer-Encoding'] = 'chunked'
+                    wrap_in_chunking = 1
+                else:
+                    close_it = 1
+        elif self.version is None:
+                # Although we don't *really* support http/0.9 (because we'd have to
+                # use \r\n as a terminator, and it would just yuck up a lot of stuff)
+                # it's very common for developers to not want to type a version number
+                # when using telnet to debug a server.
+            close_it = 1
+            
+        outgoing_header = producers.simple_producer (self.build_reply_header())
+        
+        if close_it:
+            self['Connection'] = 'close'
+            
+        if wrap_in_chunking:
+            outgoing_producer = producers.chunked_producer (
+                    producers.composite_producer (self.outgoing)
+                    )
+            # prepend the header
+            outgoing_producer = producers.composite_producer (
+                    fifo([outgoing_header, outgoing_producer])
+                    )
+        else:
+                # prepend the header
+            self.outgoing.push_front (outgoing_header)
+            outgoing_producer = producers.composite_producer (self.outgoing)
+            
+            # apply a few final transformations to the output
+        self.channel.push_with_producer (
+                # globbing gives us large packets
+                producers.globbing_producer (
+                        # hooking lets us log the number of bytes sent
+                        producers.hooked_producer (
+                                outgoing_producer,
+                                self.log
+                                )
+                        )
+                )
+        
+        self.channel.current_request = None
+        
+        if close_it:
+            self.channel.close_when_done()
+            
+    def log_date_string (self, when):
+        return time.strftime (
+                '%d/%b/%Y:%H:%M:%S ',
+                time.gmtime(when)
+                ) + tz_for_log
+        
+    def log (self, bytes):
+        self.channel.server.logger.log (
+                self.channel.addr[0],
+                ' - - [%s] "%s" %d %d\n' % (
+                        self.log_date_string (time.time()),
+                        self.request,
+                        self.reply_code,
+                        bytes
+                        )
+                )
+        
+    responses = {
+            100: "Continue",
+            101: "Switching Protocols",
+            200: "OK",
+            201: "Created",
+            202: "Accepted",
+            203: "Non-Authoritative Information",
+            204: "No Content",
+            205: "Reset Content",
+            206: "Partial Content",
+            300: "Multiple Choices",
+            301: "Moved Permanently",
+            302: "Moved Temporarily",
+            303: "See Other",
+            304: "Not Modified",
+            305: "Use Proxy",
+            400: "Bad Request",
+            401: "Unauthorized",
+            402: "Payment Required",
+            403: "Forbidden",
+            404: "Not Found",
+            405: "Method Not Allowed",
+            406: "Not Acceptable",
+            407: "Proxy Authentication Required",
+            408: "Request Time-out",
+            409: "Conflict",
+            410: "Gone",
+            411: "Length Required",
+            412: "Precondition Failed",
+            413: "Request Entity Too Large",
+            414: "Request-URI Too Large",
+            415: "Unsupported Media Type",
+            500: "Internal Server Error",
+            501: "Not Implemented",
+            502: "Bad Gateway",
+            503: "Service Unavailable",
+            504: "Gateway Time-out",
+            505: "HTTP Version not supported"
+            }
+    
+    # Default error message
+    DEFAULT_ERROR_MESSAGE = string.join (
+            ['<head>',
+             '<title>Error response</title>',
+             '</head>',
+             '<body>',
+             '<h1>Error response</h1>',
+             '<p>Error code %(code)d.',
+             '<p>Message: %(message)s.',
+             '</body>',
+             ''
+             ],
+            '\r\n'
+            )
+    
+    
+    # ===========================================================================
+    #						 HTTP Channel Object
+    # ===========================================================================
+    
 class http_channel (asynchat.async_chat):
-
-	# use a larger default output buffer
-	ac_out_buffer_size = 1<<16
-
-	current_request = None
-	channel_counter = counter()
-
-	def __init__ (self, server, conn, addr):
-		self.channel_number = http_channel.channel_counter.increment()
-		self.request_counter = counter()
-		asynchat.async_chat.__init__ (self, conn)
-		self.server = server
-		self.addr = addr
-		self.set_terminator ('\r\n\r\n')
-		self.in_buffer = ''
-		self.creation_time = int (time.time())
-		self.check_maintenance()
-
-	def __repr__ (self):
-		ar = asynchat.async_chat.__repr__(self)[1:-1]
-		return '<%s channel#: %s requests:%s>' % (
-			ar,
-			self.channel_number,
-			self.request_counter
-			)
-
-	# Channel Counter, Maintenance Interval...
-	maintenance_interval = 500
-
-	def check_maintenance (self):
-		if not self.channel_number % self.maintenance_interval:
-			self.maintenance()
-
-	def maintenance (self):
-		self.kill_zombies()
-
-	# 30-minute zombie timeout.  status_handler also knows how to kill zombies.
-	zombie_timeout = 30 * 60
-
-	def kill_zombies (self):
-		now = int (time.time())
-		for channel in asyncore.socket_map.values():
-			if channel.__class__ == self.__class__:
-				if (now - channel.creation_time) > channel.zombie_timeout:
-					channel.close()
-
-	# --------------------------------------------------
-	# send/recv overrides, good place for instrumentation.
-	# --------------------------------------------------
-	
-	# this information needs to get into the request object,
-	# so that it may log correctly.
-	def send (self, data):
-		result = asynchat.async_chat.send (self, data)
-		self.server.bytes_out.increment (len(data))
-		return result
-
-	def recv (self, buffer_size):
-		try:
-			result = asynchat.async_chat.recv (self, buffer_size)
-			self.server.bytes_in.increment (len(result))
-			return result
-		except MemoryError:
-			# --- Save a Trip to Your Service Provider ---
-			# It's possible for a process to eat up all the memory of
-			# the machine, and put it in an extremely wedged state,
-			# where medusa keeps running and can't be shut down.  This
-			# is where MemoryError tends to get thrown, though of
-			# course it could get thrown elsewhere.
-			sys.exit ("Out of Memory!")
-
-	def handle_error (self):
-		t, v = sys.exc_info()[:2]
-		if t is SystemExit:
-			raise t, v
-		else:
-			asynchat.async_chat.handle_error (self)
-
-	def log (self, *args):
-		pass
-
-	# --------------------------------------------------
-	# async_chat methods
-	# --------------------------------------------------
-
-	def collect_incoming_data (self, data):
-		if self.current_request:
-			# we are receiving data (probably POST data) for a request
-			self.current_request.collect_incoming_data (data)
-		else:
-			# we are receiving header (request) data
-			self.in_buffer = self.in_buffer + data
 
-	def found_terminator (self):
-		if self.current_request:
-			self.current_request.found_terminator()
-		else:
-			header = self.in_buffer
-			self.in_buffer = ''
-			lines = string.split (header, '\r\n')
-
-			# --------------------------------------------------
-			# crack the request header
-			# --------------------------------------------------
-
-			while lines and not lines[0]:
-				# as per the suggestion of http-1.1 section 4.1, (and
-				# Eric Parker <eparker@zyvex.com>), ignore a leading
-				# blank lines (buggy browsers tack it onto the end of
-				# POST requests)
-				lines = lines[1:]
-
-			if not lines:
-				self.close_when_done()
-				return
-
-			request = lines[0]
-
-
-
-			command, uri, version = crack_request (request)
-
-			# unquote path if necessary (thanks to Skip Montaro for pointing
-			# out that we must unquote in piecemeal fashion).
+        # use a larger default output buffer
+    ac_out_buffer_size = 1<<16
+    
+    current_request = None
+    channel_counter = counter()
+    
+    def __init__ (self, server, conn, addr):
+        self.channel_number = http_channel.channel_counter.increment()
+        self.request_counter = counter()
+        asynchat.async_chat.__init__ (self, conn)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n\r\n')
+        self.in_buffer = ''
+        self.creation_time = int (time.time())
+        self.check_maintenance()
+        
+    def __repr__ (self):
+        ar = asynchat.async_chat.__repr__(self)[1:-1]
+        return '<%s channel#: %s requests:%s>' % (
+                ar,
+                self.channel_number,
+                self.request_counter
+                )
+        
+        # Channel Counter, Maintenance Interval...
+    maintenance_interval = 500
+    
+    def check_maintenance (self):
+        if not self.channel_number % self.maintenance_interval:
+            self.maintenance()
+            
+    def maintenance (self):
+        self.kill_zombies()
+        
+        # 30-minute zombie timeout.  status_handler also knows how to kill zombies.
+    zombie_timeout = 30 * 60
+    
+    def kill_zombies (self):
+        now = int (time.time())
+        for channel in asyncore.socket_map.values():
+            if channel.__class__ == self.__class__:
+                if (now - channel.creation_time) > channel.zombie_timeout:
+                    channel.close()
+                    
+                    # --------------------------------------------------
+                    # send/recv overrides, good place for instrumentation.
+                    # --------------------------------------------------
+                    
+                    # this information needs to get into the request object,
+                    # so that it may log correctly.
+    def send (self, data):
+        result = asynchat.async_chat.send (self, data)
+        self.server.bytes_out.increment (len(data))
+        return result
+        
+    def recv (self, buffer_size):
+        try:
+            result = asynchat.async_chat.recv (self, buffer_size)
+            self.server.bytes_in.increment (len(result))
+            return result
+        except MemoryError:
+                # --- Save a Trip to Your Service Provider ---
+                # It's possible for a process to eat up all the memory of
+                # the machine, and put it in an extremely wedged state,
+                # where medusa keeps running and can't be shut down.  This
+                # is where MemoryError tends to get thrown, though of
+                # course it could get thrown elsewhere.
+            sys.exit ("Out of Memory!")
+            
+    def handle_error (self):
+        t, v = sys.exc_info()[:2]
+        if t is SystemExit:
+            raise t, v
+        else:
+            asynchat.async_chat.handle_error (self)
+            
+    def log (self, *args):
+        pass
+        
+        # --------------------------------------------------
+        # async_chat methods
+        # --------------------------------------------------
+        
+    def collect_incoming_data (self, data):
+        if self.current_request:
+                # we are receiving data (probably POST data) for a request
+            self.current_request.collect_incoming_data (data)
+        else:
+                # we are receiving header (request) data
+            self.in_buffer = self.in_buffer + data
+            
+    def found_terminator (self):
+        if self.current_request:
+            self.current_request.found_terminator()
+        else:
+            header = self.in_buffer
+            self.in_buffer = ''
+            lines = string.split (header, '\r\n')
+            
+            # --------------------------------------------------
+            # crack the request header
+            # --------------------------------------------------
+            
+            while lines and not lines[0]:
+                    # as per the suggestion of http-1.1 section 4.1, (and
+                    # Eric Parker <eparker@zyvex.com>), ignore a leading
+                    # blank lines (buggy browsers tack it onto the end of
+                    # POST requests)
+                lines = lines[1:]
+                
+            if not lines:
+                self.close_when_done()
+                return
+                
+            request = lines[0]
+            
+            
+            
+            command, uri, version = crack_request (request)
+            
+            # unquote path if necessary (thanks to Skip Montaro for pointing
+            # out that we must unquote in piecemeal fashion).
             # ajung: unquote the request *after* calling crack_request because
             # this function breaks when it gets an unquoted request
-
-			if '%' in request:
-				request = unquote (request)
-
-
-			header = join_headers (lines[1:])
-
-			r = http_request (self, request, command, uri, version, header)
-			self.request_counter.increment()
-			self.server.total_requests.increment()
-
-			if command is None:
-				self.log_info ('Bad HTTP request: %s' % repr(request), 'error')
-				r.error (400)
-				return
-
-			# --------------------------------------------------
-			# handler selection and dispatch
-			# --------------------------------------------------
-			for h in self.server.handlers:
-				if h.match (r):
-					try:
-						self.current_request = r
-						# This isn't used anywhere.
-						# r.handler = h # CYCLE
-						h.handle_request (r)
-					except:
-						self.server.exceptions.increment()
-						(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
-						self.log_info(
-								'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line),
-								'error')
-						try:
-							r.error (500)
-						except:
-							pass
-					return
-
-			# no handlers, so complain
-			r.error (404)
-
-	def writable (self):
-		# this is just the normal async_chat 'writable', here for comparison
-		return self.ac_out_buffer or len(self.producer_fifo)
-
-	def writable_for_proxy (self):
-		# this version of writable supports the idea of a 'stalled' producer
-		# [i.e., it's not ready to produce any output yet] This is needed by
-		# the proxy, which will be waiting for the magic combination of
-		# 1) hostname resolved
-		# 2) connection made
-		# 3) data available.
-		if self.ac_out_buffer:
-			return 1
-		elif len(self.producer_fifo):
-			p = self.producer_fifo.first()
-			if hasattr (p, 'stalled'):
-				return not p.stalled()
-			else:
-				return 1
-
-# ===========================================================================
-#						 HTTP Server Object
-# ===========================================================================
-
+            
+            if '%' in request:
+                request = unquote (request)
+                
+                
+            header = join_headers (lines[1:])
+            
+            r = http_request (self, request, command, uri, version, header)
+            self.request_counter.increment()
+            self.server.total_requests.increment()
+            
+            if command is None:
+                self.log_info ('Bad HTTP request: %s' % repr(request), 'error')
+                r.error (400)
+                return
+                
+                # --------------------------------------------------
+                # handler selection and dispatch
+                # --------------------------------------------------
+            for h in self.server.handlers:
+                if h.match (r):
+                    try:
+                        self.current_request = r
+                        # This isn't used anywhere.
+                        # r.handler = h # CYCLE
+                        h.handle_request (r)
+                    except:
+                        self.server.exceptions.increment()
+                        (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
+                        self.log_info(
+                                        'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line),
+                                        'error')
+                        try:
+                            r.error (500)
+                        except:
+                            pass
+                    return
+                    
+                    # no handlers, so complain
+            r.error (404)
+            
+    def writable (self):
+            # this is just the normal async_chat 'writable', here for comparison
+        return self.ac_out_buffer or len(self.producer_fifo)
+        
+    def writable_for_proxy (self):
+            # this version of writable supports the idea of a 'stalled' producer
+            # [i.e., it's not ready to produce any output yet] This is needed by
+            # the proxy, which will be waiting for the magic combination of
+            # 1) hostname resolved
+            # 2) connection made
+            # 3) data available.
+        if self.ac_out_buffer:
+            return 1
+        elif len(self.producer_fifo):
+            p = self.producer_fifo.first()
+            if hasattr (p, 'stalled'):
+                return not p.stalled()
+            else:
+                return 1
+                
+                # ===========================================================================
+                #						 HTTP Server Object
+                # ===========================================================================
+                
 class http_server (asyncore.dispatcher):
-
-	SERVER_IDENT = 'HTTP Server (V%s)' % VERSION_STRING
-
-	channel_class = http_channel
-
-	def __init__ (self, ip, port, resolver=None, logger_object=None):
-		self.ip = ip
-		self.port = port
-		asyncore.dispatcher.__init__ (self)
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-
-		self.handlers = []
-
-		if not logger_object:
-			logger_object = logger.file_logger (sys.stdout)
-
-		self.set_reuse_addr()
-		self.bind ((ip, port))
-
-		# lower this to 5 if your OS complains
-		self.listen (1024)
 
-		host, port = self.socket.getsockname()
-		if not ip:
-			self.log_info('Computing default hostname', 'warning')
-			ip = socket.gethostbyname (socket.gethostname())
-		try:
-			self.server_name = socket.gethostbyaddr (ip)[0]
-		except socket.error:
-			self.log_info('Cannot do reverse lookup', 'warning')
-			self.server_name = ip       # use the IP address as the "hostname"
-
-		self.server_port = port
-		self.total_clients = counter()
-		self.total_requests = counter()
-		self.exceptions = counter()
-		self.bytes_out = counter()
-		self.bytes_in  = counter()
-
-		if not logger_object:
-			logger_object = logger.file_logger (sys.stdout)
-
-		if resolver:
-			self.logger = logger.resolving_logger (resolver, logger_object)
-		else:
-			self.logger = logger.unresolving_logger (logger_object)
-
-		self.log_info (
-			'Medusa (V%s) started at %s'
-			'\n\tHostname: %s'
-			'\n\tPort:%d'
-			'\n' % (
-				VERSION_STRING,
-				time.ctime(time.time()),
-				self.server_name,
-				port,
-				)
-			)
-
-	def writable (self):
-		return 0
-
-	def handle_read (self):
-		pass
-
-	def readable (self):
-		return self.accepting
-
-	def handle_connect (self):
-		pass
-
-	def handle_accept (self):
-		self.total_clients.increment()
-		try:
-			conn, addr = self.accept()
-		except socket.error:
-			# linux: on rare occasions we get a bogus socket back from
-			# accept.  socketmodule.c:makesockaddr complains that the
-			# address family is unknown.  We don't want the whole server
-			# to shut down because of this.
-			self.log_info ('warning: server accept() threw an exception', 'warning')
-			return
-		except TypeError:
-			# unpack non-sequence.  this can happen when a read event
-			# fires on a listening socket, but when we call accept()
-			# we get EWOULDBLOCK, so dispatcher.accept() returns None.
-			# Seen on FreeBSD3.
-			self.log_info ('warning: server accept() threw EWOULDBLOCK', 'warning')
-			return
-
-		self.channel_class (self, conn, addr)
-
-	def install_handler (self, handler, back=0):
-		if back:
-			self.handlers.append (handler)
-		else:
-			self.handlers.insert (0, handler)
-
-	def remove_handler (self, handler):
-		self.handlers.remove (handler)
-
-	def status (self):
-		def nice_bytes (n):
-			return string.join (status_handler.english_bytes (n))
-
-		handler_stats = filter (None, map (maybe_status, self.handlers))
-		
-		if self.total_clients:
-			ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
-		else:
-			ratio = 0.0
-		
-		return producers.composite_producer (
-			fifo ([producers.lines_producer (
-				['<h2>%s</h2>'							% self.SERVER_IDENT,
-				'<br>Listening on: <b>Host:</b> %s'		% self.server_name,
-				'<b>Port:</b> %d'						% self.port,
-				 '<p><ul>'
-				 '<li>Total <b>Clients:</b> %s'			% self.total_clients,
-				 '<b>Requests:</b> %s'					% self.total_requests,
-				 '<b>Requests/Client:</b> %.1f'			% (ratio),
-				 '<li>Total <b>Bytes In:</b> %s'	% (nice_bytes (self.bytes_in.as_long())),
-				 '<b>Bytes Out:</b> %s'				% (nice_bytes (self.bytes_out.as_long())),
-				 '<li>Total <b>Exceptions:</b> %s'		% self.exceptions,
-				 '</ul><p>'
-				 '<b>Extension List</b><ul>',
-				 ])] + handler_stats + [producers.simple_producer('</ul>')]
-				  )
-			)
-
+    SERVER_IDENT = 'HTTP Server (V%s)' % VERSION_STRING
+    
+    channel_class = http_channel
+    
+    def __init__ (self, ip, port, resolver=None, logger_object=None):
+        self.ip = ip
+        self.port = port
+        asyncore.dispatcher.__init__ (self)
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        
+        self.handlers = []
+        
+        if not logger_object:
+            logger_object = logger.file_logger (sys.stdout)
+            
+        self.set_reuse_addr()
+        self.bind ((ip, port))
+        
+        # lower this to 5 if your OS complains
+        self.listen (1024)
+        
+        host, port = self.socket.getsockname()
+        if not ip:
+            self.log_info('Computing default hostname', 'warning')
+            ip = socket.gethostbyname (socket.gethostname())
+        try:
+            self.server_name = socket.gethostbyaddr (ip)[0]
+        except socket.error:
+            self.log_info('Cannot do reverse lookup', 'warning')
+            self.server_name = ip       # use the IP address as the "hostname"
+            
+        self.server_port = port
+        self.total_clients = counter()
+        self.total_requests = counter()
+        self.exceptions = counter()
+        self.bytes_out = counter()
+        self.bytes_in  = counter()
+        
+        if not logger_object:
+            logger_object = logger.file_logger (sys.stdout)
+            
+        if resolver:
+            self.logger = logger.resolving_logger (resolver, logger_object)
+        else:
+            self.logger = logger.unresolving_logger (logger_object)
+            
+        self.log_info (
+                'Medusa (V%s) started at %s'
+                '\n\tHostname: %s'
+                '\n\tPort:%d'
+                '\n' % (
+                        VERSION_STRING,
+                        time.ctime(time.time()),
+                        self.server_name,
+                        port,
+                        )
+                )
+        
+    def writable (self):
+        return 0
+        
+    def handle_read (self):
+        pass
+        
+    def readable (self):
+        return self.accepting
+        
+    def handle_connect (self):
+        pass
+        
+    def handle_accept (self):
+        self.total_clients.increment()
+        try:
+            conn, addr = self.accept()
+        except socket.error:
+                # linux: on rare occasions we get a bogus socket back from
+                # accept.  socketmodule.c:makesockaddr complains that the
+                # address family is unknown.  We don't want the whole server
+                # to shut down because of this.
+            self.log_info ('warning: server accept() threw an exception', 'warning')
+            return
+        except TypeError:
+                # unpack non-sequence.  this can happen when a read event
+                # fires on a listening socket, but when we call accept()
+                # we get EWOULDBLOCK, so dispatcher.accept() returns None.
+                # Seen on FreeBSD3.
+            self.log_info ('warning: server accept() threw EWOULDBLOCK', 'warning')
+            return
+            
+        self.channel_class (self, conn, addr)
+        
+    def install_handler (self, handler, back=0):
+        if back:
+            self.handlers.append (handler)
+        else:
+            self.handlers.insert (0, handler)
+            
+    def remove_handler (self, handler):
+        self.handlers.remove (handler)
+        
+    def status (self):
+        def nice_bytes (n):
+            return string.join (status_handler.english_bytes (n))
+            
+        handler_stats = filter (None, map (maybe_status, self.handlers))
+        
+        if self.total_clients:
+            ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
+        else:
+            ratio = 0.0
+            
+        return producers.composite_producer (
+                fifo ([producers.lines_producer (
+                        ['<h2>%s</h2>'							% self.SERVER_IDENT,
+                        '<br>Listening on: <b>Host:</b> %s'		% self.server_name,
+                        '<b>Port:</b> %d'						% self.port,
+                         '<p><ul>'
+                         '<li>Total <b>Clients:</b> %s'			% self.total_clients,
+                         '<b>Requests:</b> %s'					% self.total_requests,
+                         '<b>Requests/Client:</b> %.1f'			% (ratio),
+                         '<li>Total <b>Bytes In:</b> %s'	% (nice_bytes (self.bytes_in.as_long())),
+                         '<b>Bytes Out:</b> %s'				% (nice_bytes (self.bytes_out.as_long())),
+                         '<li>Total <b>Exceptions:</b> %s'		% self.exceptions,
+                         '</ul><p>'
+                         '<b>Extension List</b><ul>',
+                         ])] + handler_stats + [producers.simple_producer('</ul>')]
+                          )
+                )
+        
 def maybe_status (thing):
-	if hasattr (thing, 'status'):
-		return thing.status()
-	else:
-		return None
-
+    if hasattr (thing, 'status'):
+        return thing.status()
+    else:
+        return None
+        
 CONNECTION = re.compile ('Connection: (.*)', re.IGNORECASE)
 
 # merge multi-line headers
 # [486dx2: ~500/sec]
 def join_headers (headers):
-	r = []
-	for i in range(len(headers)):
-		if headers[i][0] in ' \t':	
-			r[-1] = r[-1] + headers[i][1:]
-		else:
-			r.append (headers[i])
-	return r
-
+    r = []
+    for i in range(len(headers)):
+        if headers[i][0] in ' \t':	
+            r[-1] = r[-1] + headers[i][1:]
+        else:
+            r.append (headers[i])
+    return r
+    
 def get_header (head_reg, lines, group=1):
-	for line in lines:
-		m = head_reg.match (line)
-		if m and m.end() == len(line):
-			return m.group (group)
-	return ''
-
+    for line in lines:
+        m = head_reg.match (line)
+        if m and m.end() == len(line):
+            return m.group (group)
+    return ''
+    
 def get_header_match (head_reg, lines):
-	for line in lines:
-		m = head_reg.match (line)
-		if m and m.end() == len(line):
-			return m
-	return ''
-
+    for line in lines:
+        m = head_reg.match (line)
+        if m and m.end() == len(line):
+            return m
+    return ''
+    
 REQUEST = re.compile ('([^ ]+) ([^ ]+)(( HTTP/([0-9.]+))$|$)')
 
 def crack_request (r):
-	m = REQUEST.match (r)
-	if m.end() == len(r):
-		if m.group(3):
-			version = m.group(5)
-		else:
-			version = None
-		return string.lower (m.group(1)), m.group(2), version
-	else:
-		return None, None, None
-
+    m = REQUEST.match (r)
+    if m.end() == len(r):
+        if m.group(3):
+            version = m.group(5)
+        else:
+            version = None
+        return string.lower (m.group(1)), m.group(2), version
+    else:
+        return None, None, None
+        
 class fifo:
-	def __init__ (self, list=None):
-		if not list:
-			self.list = []
-		else:
-			self.list = list
-		
-	def __len__ (self):
-		return len(self.list)
-
-	def first (self):
-		return self.list[0]
-
-	def push_front (self, object):
-		self.list.insert (0, object)
-
-	def push (self, data):
-		self.list.append (data)
-
-	def pop (self):
-		if self.list:
-			result = self.list[0]
-			del self.list[0]
-			return (1, result)
-		else:
-			return (0, None)
-
+    def __init__ (self, list=None):
+        if not list:
+            self.list = []
+        else:
+            self.list = list
+            
+    def __len__ (self):
+        return len(self.list)
+        
+    def first (self):
+        return self.list[0]
+        
+    def push_front (self, object):
+        self.list.insert (0, object)
+        
+    def push (self, data):
+        self.list.append (data)
+        
+    def pop (self):
+        if self.list:
+            result = self.list[0]
+            del self.list[0]
+            return (1, result)
+        else:
+            return (0, None)
+            
 def compute_timezone_for_log ():
-	if time.daylight:
-		tz = time.altzone
-	else:
-		tz = time.timezone
-	if tz > 0:
-		neg = 1
-	else:
-		neg = 0
-		tz = -tz
-	h, rem = divmod (tz, 3600)
-	m, rem = divmod (rem, 60)
-	if neg:
-		return '-%02d%02d' % (h, m)
-	else:
-		return '+%02d%02d' % (h, m)
-
-# if you run this program over a TZ change boundary, this will be invalid.
+    if time.daylight:
+        tz = time.altzone
+    else:
+        tz = time.timezone
+    if tz > 0:
+        neg = 1
+    else:
+        neg = 0
+        tz = -tz
+    h, rem = divmod (tz, 3600)
+    m, rem = divmod (rem, 60)
+    if neg:
+        return '-%02d%02d' % (h, m)
+    else:
+        return '+%02d%02d' % (h, m)
+        
+        # if you run this program over a TZ change boundary, this will be invalid.
 tz_for_log = compute_timezone_for_log()
 
 if __name__ == '__main__':
-	import sys
-	if len(sys.argv) < 2:
-		print 'usage: %s <root> <port>' % (sys.argv[0])
-	else:
-		import monitor
-		import filesys
-		import default_handler
-		import status_handler
-		import ftp_server
-		import chat_server
-		import resolver
-		import logger
-		rs = resolver.caching_resolver ('127.0.0.1')
-		lg = logger.file_logger (sys.stdout)
-		ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', 9999)
-		fs = filesys.os_filesystem (sys.argv[1])
-		dh = default_handler.default_handler (fs)
-		hs = http_server ('', string.atoi (sys.argv[2]), rs, lg)
-		hs.install_handler (dh)
-		ftp = ftp_server.ftp_server (
-			ftp_server.dummy_authorizer(sys.argv[1]),
-			port=8021,
-			resolver=rs,
-			logger_object=lg
-			)
-		cs = chat_server.chat_server ('', 7777)
-		sh = status_handler.status_extension([hs,ms,ftp,cs,rs])
-		hs.install_handler (sh)
-		if ('-p' in sys.argv):
-			def profile_loop ():
-				try:
-					asyncore.loop()
-				except KeyboardInterrupt:
-					pass
-			import profile
-			profile.run ('profile_loop()', 'profile.out')
-		else:
-			asyncore.loop()
+    import sys
+    if len(sys.argv) < 2:
+        print 'usage: %s <root> <port>' % (sys.argv[0])
+    else:
+        import monitor
+        import filesys
+        import default_handler
+        import status_handler
+        import ftp_server
+        import chat_server
+        import resolver
+        import logger
+        rs = resolver.caching_resolver ('127.0.0.1')
+        lg = logger.file_logger (sys.stdout)
+        ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', 9999)
+        fs = filesys.os_filesystem (sys.argv[1])
+        dh = default_handler.default_handler (fs)
+        hs = http_server ('', string.atoi (sys.argv[2]), rs, lg)
+        hs.install_handler (dh)
+        ftp = ftp_server.ftp_server (
+                ftp_server.dummy_authorizer(sys.argv[1]),
+                port=8021,
+                resolver=rs,
+                logger_object=lg
+                )
+        cs = chat_server.chat_server ('', 7777)
+        sh = status_handler.status_extension([hs,ms,ftp,cs,rs])
+        hs.install_handler (sh)
+        if ('-p' in sys.argv):
+            def profile_loop ():
+                try:
+                    asyncore.loop()
+                except KeyboardInterrupt:
+                    pass
+            import profile
+            profile.run ('profile_loop()', 'profile.out')
+        else:
+            asyncore.loop()

--- Updated File logger.py in package Zope2 --
--- logger.py	2001/04/25 19:07:32	1.10
+++ logger.py	2001/05/01 11:44:48	1.11
@@ -32,231 +32,231 @@
 #  o	syslog output...
 
 class file_logger:
-			
-	# pass this either a path or a file object.
-	def __init__ (self, file, flush=1, mode='a'):
-		if type(file) == type(''):
-			if (file == '-'):
-				import sys
-				self.file = sys.stdout
-			else:
-				self.file = open (file, mode)
-		else:
-			self.file = file
-		self.do_flush = flush
 
-	def __repr__ (self):
-		return '<file logger: %s>' % self.file
-
-	def write (self, data):
-		self.file.write (data)
-		self.maybe_flush()
-		
-	def writeline (self, line):
-		self.file.writeline (line)
-		self.maybe_flush()
-		
-	def writelines (self, lines):
-		self.file.writelines (lines)
-		self.maybe_flush()
-
-	def maybe_flush (self):
-		if self.do_flush:
-			self.file.flush()
-
-	def flush (self):
-		self.file.flush()
-
-	def softspace (self, *args):
-		pass
-
-	def log (self, message):
-		if message[-1] not in ('\r', '\n'):
-			self.write (message + '\n')
-		else:
-			self.write (message)
-
-# like a file_logger, but it must be attached to a filename.
-# When the log gets too full, or a certain time has passed,
-# it backs up the log and starts a new one.  Note that backing
-# up the log is done via "mv" because anything else (cp, gzip)
-# would take time, during which medusa would do nothing else.
-
+        # pass this either a path or a file object.
+    def __init__ (self, file, flush=1, mode='a'):
+        if type(file) == type(''):
+            if (file == '-'):
+                import sys
+                self.file = sys.stdout
+            else:
+                self.file = open (file, mode)
+        else:
+            self.file = file
+        self.do_flush = flush
+        
+    def __repr__ (self):
+        return '<file logger: %s>' % self.file
+        
+    def write (self, data):
+        self.file.write (data)
+        self.maybe_flush()
+        
+    def writeline (self, line):
+        self.file.writeline (line)
+        self.maybe_flush()
+        
+    def writelines (self, lines):
+        self.file.writelines (lines)
+        self.maybe_flush()
+        
+    def maybe_flush (self):
+        if self.do_flush:
+            self.file.flush()
+            
+    def flush (self):
+        self.file.flush()
+        
+    def softspace (self, *args):
+        pass
+        
+    def log (self, message):
+        if message[-1] not in ('\r', '\n'):
+            self.write (message + '\n')
+        else:
+            self.write (message)
+            
+            # like a file_logger, but it must be attached to a filename.
+            # When the log gets too full, or a certain time has passed,
+            # it backs up the log and starts a new one.  Note that backing
+            # up the log is done via "mv" because anything else (cp, gzip)
+            # would take time, during which medusa would do nothing else.
+            
 class rotating_file_logger (file_logger):
-			
-	# If freq is non-None we back up "daily", "weekly", or "monthly".
-	# Else if maxsize is non-None we back up whenever the log gets
-	# to big.  If both are None we never back up.
-	def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
-		self.filename = file
-		self.mode = mode
-		self.file = open (file, mode)
-		self.freq = freq
-		self.maxsize = maxsize
-		self.rotate_when = self.next_backup(self.freq)
-		self.do_flush = flush
-
-	def __repr__ (self):
-		return '<rotating-file logger: %s>' % self.file
-
-	# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
-	def next_backup (self, freq):
-		(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
-		if freq == 'daily':
-			return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
-		elif freq == 'weekly':
-			return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1)  # wd(monday)==0
-		elif freq == 'monthly':
-			return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
-		else:
-			return None                  # not a date-based backup
 
-	def maybe_flush (self):              # rotate first if necessary
-		self.maybe_rotate()
-		if self.do_flush:                # from file_logger()
-			self.file.flush()
-
-	def maybe_rotate (self):
-		if self.freq and time.time() > self.rotate_when:
-			self.rotate()
-			self.rotate_when = self.next_backup(self.freq)
-		elif self.maxsize:               # rotate when we get too big
-			try:
-				if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
-					self.rotate()
- 			except os.error:             # file not found, probably
-				self.rotate()            # will create a new file
-
-	def rotate (self):
-		(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
-		try:
-			self.file.close()
-			newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
-			try:
-				open(newname, "r").close()      # check if file exists
-				newname = newname + "-%02d%02d%02d" % (hr, min, sec)
-			except:                             # YEARMODY is unique
-				pass
-			os.rename(self.filename, newname)
-			self.file = open(self.filename, self.mode)
-		except:
-			pass
-
-# syslog is a line-oriented log protocol - this class would be
-# appropriate for FTP or HTTP logs, but not for dumping stderr to.
-
-# TODO: a simple safety wrapper that will ensure that the line sent
-# to syslog is reasonable.
-
-# TODO: async version of syslog_client: now, log entries use blocking
-# send()
-
+        # If freq is non-None we back up "daily", "weekly", or "monthly".
+        # Else if maxsize is non-None we back up whenever the log gets
+        # to big.  If both are None we never back up.
+    def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
+        self.filename = file
+        self.mode = mode
+        self.file = open (file, mode)
+        self.freq = freq
+        self.maxsize = maxsize
+        self.rotate_when = self.next_backup(self.freq)
+        self.do_flush = flush
+        
+    def __repr__ (self):
+        return '<rotating-file logger: %s>' % self.file
+        
+        # We back up at midnight every 1) day, 2) monday, or 3) 1st of month
+    def next_backup (self, freq):
+        (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+        if freq == 'daily':
+            return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
+        elif freq == 'weekly':
+            return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1)  # wd(monday)==0
+        elif freq == 'monthly':
+            return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
+        else:
+            return None                  # not a date-based backup
+            
+    def maybe_flush (self):              # rotate first if necessary
+        self.maybe_rotate()
+        if self.do_flush:                # from file_logger()
+            self.file.flush()
+            
+    def maybe_rotate (self):
+        if self.freq and time.time() > self.rotate_when:
+            self.rotate()
+            self.rotate_when = self.next_backup(self.freq)
+        elif self.maxsize:               # rotate when we get too big
+            try:
+                if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
+                    self.rotate()
+            except os.error:             # file not found, probably
+                self.rotate()            # will create a new file
+                
+    def rotate (self):
+        (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+        try:
+            self.file.close()
+            newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
+            try:
+                open(newname, "r").close()      # check if file exists
+                newname = newname + "-%02d%02d%02d" % (hr, min, sec)
+            except:                             # YEARMODY is unique
+                pass
+            os.rename(self.filename, newname)
+            self.file = open(self.filename, self.mode)
+        except:
+            pass
+            
+            # syslog is a line-oriented log protocol - this class would be
+            # appropriate for FTP or HTTP logs, but not for dumping stderr to.
+            
+            # TODO: a simple safety wrapper that will ensure that the line sent
+            # to syslog is reasonable.
+            
+            # TODO: async version of syslog_client: now, log entries use blocking
+            # send()
+            
 import m_syslog
 syslog_logger = m_syslog.syslog_client
 
 class syslog_logger (m_syslog.syslog_client):
-	def __init__ (self, address, facility='user'):
-		m_syslog.syslog_client.__init__ (self, address)
-		self.facility = m_syslog.facility_names[facility]
-		self.address=address
-
-	def __repr__ (self):
-		return '<syslog logger address=%s>' % (repr(self.address))
-
-	def log (self, message):
-		m_syslog.syslog_client.log (
-			self,
-			message,
-			facility=self.facility,
-			priority=m_syslog.LOG_INFO
-			)
-
-# log to a stream socket, asynchronously
-
+    def __init__ (self, address, facility='user'):
+        m_syslog.syslog_client.__init__ (self, address)
+        self.facility = m_syslog.facility_names[facility]
+        self.address=address
+        
+    def __repr__ (self):
+        return '<syslog logger address=%s>' % (repr(self.address))
+        
+    def log (self, message):
+        m_syslog.syslog_client.log (
+                self,
+                message,
+                facility=self.facility,
+                priority=m_syslog.LOG_INFO
+                )
+        
+        # log to a stream socket, asynchronously
+        
 class socket_logger (asynchat.async_chat):
-
-	def __init__ (self, address):
-
-		if type(address) == type(''):
-			self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
-		else:
-			self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-
-		self.connect (address)
-		self.address = address
-		
-	def __repr__ (self):
-		return '<socket logger: address=%s>' % (self.address)
-
-	def log (self, message):
-		if message[-2:] != '\r\n':
-			self.socket.push (message + '\r\n')
-		else:
-			self.socket.push (message)
 
-# log to multiple places
+    def __init__ (self, address):
+    
+        if type(address) == type(''):
+            self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
+        else:
+            self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+            
+        self.connect (address)
+        self.address = address
+        
+    def __repr__ (self):
+        return '<socket logger: address=%s>' % (self.address)
+        
+    def log (self, message):
+        if message[-2:] != '\r\n':
+            self.socket.push (message + '\r\n')
+        else:
+            self.socket.push (message)
+            
+            # log to multiple places
 class multi_logger:
-	def __init__ (self, loggers):
-		self.loggers = loggers
-
-	def __repr__ (self):
-		return '<multi logger: %s>' % (repr(self.loggers))
-
-	def log (self, message):
-		for logger in self.loggers:
-			logger.log (message)
-
+    def __init__ (self, loggers):
+        self.loggers = loggers
+        
+    def __repr__ (self):
+        return '<multi logger: %s>' % (repr(self.loggers))
+        
+    def log (self, message):
+        for logger in self.loggers:
+            logger.log (message)
+            
 class resolving_logger:
-	"""Feed (ip, message) combinations into this logger to get a
-	resolved hostname in front of the message.  The message will not
-	be logged until the PTR request finishes (or fails)."""
-
-	def __init__ (self, resolver, logger):
-		self.resolver = resolver
-		self.logger = logger
-
-	class logger_thunk:
-		def __init__ (self, message, logger):
-			self.message = message
-			self.logger = logger
-
-		def __call__ (self, host, ttl, answer):
-			if not answer:
-				answer = host
-			self.logger.log ('%s%s' % (answer, self.message))
-
-	def log (self, ip, message):
-		self.resolver.resolve_ptr (
-			ip,
-			self.logger_thunk (
-				message,
-				self.logger
-				)
-			)
-
+    """Feed (ip, message) combinations into this logger to get a
+    resolved hostname in front of the message.  The message will not
+    be logged until the PTR request finishes (or fails)."""
+    
+    def __init__ (self, resolver, logger):
+        self.resolver = resolver
+        self.logger = logger
+        
+    class logger_thunk:
+        def __init__ (self, message, logger):
+            self.message = message
+            self.logger = logger
+            
+        def __call__ (self, host, ttl, answer):
+            if not answer:
+                answer = host
+            self.logger.log ('%s%s' % (answer, self.message))
+            
+    def log (self, ip, message):
+        self.resolver.resolve_ptr (
+                ip,
+                self.logger_thunk (
+                        message,
+                        self.logger
+                        )
+                )
+        
 class unresolving_logger:
-	"Just in case you don't want to resolve"
-	def __init__ (self, logger):
-		self.logger = logger
-
-	def log (self, ip, message):
-		self.logger.log ('%s%s' % (ip, message))
-
-
+    "Just in case you don't want to resolve"
+    def __init__ (self, logger):
+        self.logger = logger
+        
+    def log (self, ip, message):
+        self.logger.log ('%s%s' % (ip, message))
+        
+        
 def strip_eol (line):
-	while line and line[-1] in '\r\n':
-		line = line[:-1]
-	return line
-
+    while line and line[-1] in '\r\n':
+        line = line[:-1]
+    return line
+    
 class tail_logger:
-	"Keep track of the last <size> log messages"
-	def __init__ (self, logger, size=500):
-		self.size = size
-		self.logger = logger
-		self.messages = []
-
-	def log (self, message):
-		self.messages.append (strip_eol (message))
-		if len (self.messages) > self.size:
-			del self.messages[0]
-		self.logger.log (message)
+    "Keep track of the last <size> log messages"
+    def __init__ (self, logger, size=500):
+        self.size = size
+        self.logger = logger
+        self.messages = []
+        
+    def log (self, message):
+        self.messages.append (strip_eol (message))
+        if len (self.messages) > self.size:
+            del self.messages[0]
+        self.logger.log (message)

--- Updated File m_syslog.py in package Zope2 --
--- m_syslog.py	2001/04/25 19:07:32	1.10
+++ m_syslog.py	2001/05/01 11:44:48	1.11
@@ -98,80 +98,80 @@
 LOG_LOCAL7		= 23		#  reserved for local use 
 
 priority_names = {
-	"alert":	LOG_ALERT,
-	"crit":		LOG_CRIT,
-	"debug":	LOG_DEBUG,
-	"emerg":	LOG_EMERG,
-	"err":		LOG_ERR,
-	"error":	LOG_ERR,		#  DEPRECATED 
-	"info":		LOG_INFO,
-	"notice":	LOG_NOTICE,
-	"panic": 	LOG_EMERG,		#  DEPRECATED 
-	"warn":		LOG_WARNING,		#  DEPRECATED 
-	"warning":	LOG_WARNING,
-	}
+        "alert":	LOG_ALERT,
+        "crit":		LOG_CRIT,
+        "debug":	LOG_DEBUG,
+        "emerg":	LOG_EMERG,
+        "err":		LOG_ERR,
+        "error":	LOG_ERR,		#  DEPRECATED 
+        "info":		LOG_INFO,
+        "notice":	LOG_NOTICE,
+        "panic": 	LOG_EMERG,		#  DEPRECATED 
+        "warn":		LOG_WARNING,		#  DEPRECATED 
+        "warning":	LOG_WARNING,
+        }
 
 facility_names = {
-	"auth":		LOG_AUTH,
-	"authpriv":	LOG_AUTHPRIV,
-	"cron": 	LOG_CRON,
-	"daemon":	LOG_DAEMON,
-	"kern":		LOG_KERN,
-	"lpr":		LOG_LPR,
-	"mail":		LOG_MAIL,
-	"news":		LOG_NEWS,
-	"security":	LOG_AUTH,		#  DEPRECATED 
-	"syslog":	LOG_SYSLOG,
-	"user":		LOG_USER,
-	"uucp":		LOG_UUCP,
-	"local0":	LOG_LOCAL0,
-	"local1":	LOG_LOCAL1,
-	"local2":	LOG_LOCAL2,
-	"local3":	LOG_LOCAL3,
-	"local4":	LOG_LOCAL4,
-	"local5":	LOG_LOCAL5,
-	"local6":	LOG_LOCAL6,
-	"local7":	LOG_LOCAL7,
-	}
+        "auth":		LOG_AUTH,
+        "authpriv":	LOG_AUTHPRIV,
+        "cron": 	LOG_CRON,
+        "daemon":	LOG_DAEMON,
+        "kern":		LOG_KERN,
+        "lpr":		LOG_LPR,
+        "mail":		LOG_MAIL,
+        "news":		LOG_NEWS,
+        "security":	LOG_AUTH,		#  DEPRECATED 
+        "syslog":	LOG_SYSLOG,
+        "user":		LOG_USER,
+        "uucp":		LOG_UUCP,
+        "local0":	LOG_LOCAL0,
+        "local1":	LOG_LOCAL1,
+        "local2":	LOG_LOCAL2,
+        "local3":	LOG_LOCAL3,
+        "local4":	LOG_LOCAL4,
+        "local5":	LOG_LOCAL5,
+        "local6":	LOG_LOCAL6,
+        "local7":	LOG_LOCAL7,
+        }
 
 import socket
 
 class syslog_client:
-	def __init__ (self, address='/dev/log'):
-		self.address = address
-		if type (address) == type(''):
-			self.socket = socket.socket (socket.AF_UNIX, socket.SOCK_STREAM)
-			self.socket.connect (address)
-			self.unix = 1
-		else:
-			self.socket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
-			self.unix = 0
-
-	# curious: when talking to the unix-domain '/dev/log' socket, a
-	#   zero-terminator seems to be required.  this string is placed
-	#   into a class variable so that it can be overridden if
-	#   necessary.
-
-	log_format_string = '<%d>%s\000'
-
-	def log (self, message, facility=LOG_USER, priority=LOG_INFO):
-		message = self.log_format_string % (
-			self.encode_priority (facility, priority),
-			message
-			)
-		if self.unix:
-			self.socket.send (message)
-		else:
-			self.socket.sendto (message, self.address)
-
-	def encode_priority (self, facility, priority):
-		if type(facility) == type(''):
-			facility = facility_names[facility]
-		if type(priority) == type(''):
-			priority = priority_names[priority]			
-		return (facility<<3) | priority
-
-	def close (self):
-		if self.unix:
-			self.socket.close()
-
+    def __init__ (self, address='/dev/log'):
+        self.address = address
+        if type (address) == type(''):
+            self.socket = socket.socket (socket.AF_UNIX, socket.SOCK_STREAM)
+            self.socket.connect (address)
+            self.unix = 1
+        else:
+            self.socket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
+            self.unix = 0
+            
+            # curious: when talking to the unix-domain '/dev/log' socket, a
+            #   zero-terminator seems to be required.  this string is placed
+            #   into a class variable so that it can be overridden if
+            #   necessary.
+            
+    log_format_string = '<%d>%s\000'
+    
+    def log (self, message, facility=LOG_USER, priority=LOG_INFO):
+        message = self.log_format_string % (
+                self.encode_priority (facility, priority),
+                message
+                )
+        if self.unix:
+            self.socket.send (message)
+        else:
+            self.socket.sendto (message, self.address)
+            
+    def encode_priority (self, facility, priority):
+        if type(facility) == type(''):
+            facility = facility_names[facility]
+        if type(priority) == type(''):
+            priority = priority_names[priority]			
+        return (facility<<3) | priority
+        
+    def close (self):
+        if self.unix:
+            self.socket.close()
+            

--- Updated File monitor.py in package Zope2 --
--- monitor.py	2001/04/25 19:07:33	1.11
+++ monitor.py	2001/05/01 11:44:48	1.12
@@ -22,329 +22,329 @@
 import producers
 
 class monitor_channel (asynchat.async_chat):
-	try_linemode = 1
-
-	def __init__ (self, server, sock, addr):
-		asynchat.async_chat.__init__ (self, sock)
-		self.server = server
-		self.addr = addr
-		self.set_terminator ('\r\n')
-		self.data = ''
-		# local bindings specific to this channel
-		self.local_env = sys.modules['__main__'].__dict__.copy()
-		self.push ('Python ' + sys.version + '\r\n')
-		self.push (sys.copyright+'\r\n')
-		self.push ('Welcome to %s\r\n' % self)
-		self.push ("[Hint: try 'from __main__ import *']\r\n")
-		self.prompt()
-		self.number = server.total_sessions.as_long()
-		self.line_counter = counter()
-		self.multi_line = []
-		
-	def handle_connect (self):
-		# send IAC DO LINEMODE
-		self.push ('\377\375\"')
-
-	def close (self):
-		self.server.closed_sessions.increment()
-		asynchat.async_chat.close(self)
-
-	def prompt (self):
-		self.push ('>>> ')
-
-	def collect_incoming_data (self, data):
-		self.data = self.data + data
-		if len(self.data) > 1024:
-			# denial of service.
-			self.push ('BCNU\r\n')
-			self.close_when_done()
-
-	def found_terminator (self):
-		line = self.clean_line (self.data)
-		self.data = ''
-		self.line_counter.increment()
-		# check for special case inputs...
-		if not line and not self.multi_line:
-			self.prompt()
-			return
-		if line in ['\004', 'exit']:
-			self.push ('BCNU\r\n')
-			self.close_when_done()
-			return
-		oldout = sys.stdout
-		olderr = sys.stderr
-		try:
-			p = output_producer(self, olderr)
-			sys.stdout = p
-			sys.stderr = p
-			try:
-				# this is, of course, a blocking operation.
-				# if you wanted to thread this, you would have
-				# to synchronize, etc... and treat the output
-				# like a pipe.  Not Fun.
-				#
-				# try eval first.  If that fails, try exec.  If that fails,
-				# hurl.
-				try:
-					if self.multi_line:
-						# oh, this is horrible...
-						raise SyntaxError
-					co = compile (line, repr(self), 'eval')
-					result = eval (co, self.local_env)
-					method = 'eval'
-					if result is not None:
-						print repr(result)
-					self.local_env['_'] = result
-				except SyntaxError:
-					try:
-						if self.multi_line:
-							if line and line[0] in [' ','\t']:
-								self.multi_line.append (line)
-								self.push ('... ')
-								return
-							else:
-								self.multi_line.append (line)
-								line =	string.join (self.multi_line, '\n')
-								co = compile (line, repr(self), 'exec')
-								self.multi_line = []
-						else:
-							co = compile (line, repr(self), 'exec')
-					except SyntaxError, why:
-						if why[0] == 'unexpected EOF while parsing':
-							self.push ('... ')
-							self.multi_line.append (line)
-							return
-						else:
-							t,v,tb = sys.exc_info()
-							del tb
-							raise t,v
-					exec co in self.local_env
-					method = 'exec'
-			except:
-				method = 'exception'
-				self.multi_line = []
-				(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
-				self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
-		finally:
-			sys.stdout = oldout
-			sys.stderr = olderr
-		self.log_info('%s:%s (%s)> %s' % (
-			self.number,
-			self.line_counter,
-			method,
-			repr(line))
-			)
-		self.push_with_producer (p)
-		self.prompt()
-		
-	# for now, we ignore any telnet option stuff sent to
-	# us, and we process the backspace key ourselves.
-	# gee, it would be fun to write a full-blown line-editing
-	# environment, etc...
-	def clean_line (self, line):
-		chars = []
-		for ch in line:
-			oc = ord(ch)
-			if oc < 127:
-				if oc in [8,177]:
-					# backspace
-					chars = chars[:-1]
-				else:
-					chars.append (ch)
-		return string.join (chars, '')
-
+    try_linemode = 1
+    
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        # local bindings specific to this channel
+        self.local_env = sys.modules['__main__'].__dict__.copy()
+        self.push ('Python ' + sys.version + '\r\n')
+        self.push (sys.copyright+'\r\n')
+        self.push ('Welcome to %s\r\n' % self)
+        self.push ("[Hint: try 'from __main__ import *']\r\n")
+        self.prompt()
+        self.number = server.total_sessions.as_long()
+        self.line_counter = counter()
+        self.multi_line = []
+        
+    def handle_connect (self):
+            # send IAC DO LINEMODE
+        self.push ('\377\375\"')
+        
+    def close (self):
+        self.server.closed_sessions.increment()
+        asynchat.async_chat.close(self)
+        
+    def prompt (self):
+        self.push ('>>> ')
+        
+    def collect_incoming_data (self, data):
+        self.data = self.data + data
+        if len(self.data) > 1024:
+                # denial of service.
+            self.push ('BCNU\r\n')
+            self.close_when_done()
+            
+    def found_terminator (self):
+        line = self.clean_line (self.data)
+        self.data = ''
+        self.line_counter.increment()
+        # check for special case inputs...
+        if not line and not self.multi_line:
+            self.prompt()
+            return
+        if line in ['\004', 'exit']:
+            self.push ('BCNU\r\n')
+            self.close_when_done()
+            return
+        oldout = sys.stdout
+        olderr = sys.stderr
+        try:
+            p = output_producer(self, olderr)
+            sys.stdout = p
+            sys.stderr = p
+            try:
+                    # this is, of course, a blocking operation.
+                    # if you wanted to thread this, you would have
+                    # to synchronize, etc... and treat the output
+                    # like a pipe.  Not Fun.
+                    #
+                    # try eval first.  If that fails, try exec.  If that fails,
+                    # hurl.
+                try:
+                    if self.multi_line:
+                            # oh, this is horrible...
+                        raise SyntaxError
+                    co = compile (line, repr(self), 'eval')
+                    result = eval (co, self.local_env)
+                    method = 'eval'
+                    if result is not None:
+                        print repr(result)
+                    self.local_env['_'] = result
+                except SyntaxError:
+                    try:
+                        if self.multi_line:
+                            if line and line[0] in [' ','\t']:
+                                self.multi_line.append (line)
+                                self.push ('... ')
+                                return
+                            else:
+                                self.multi_line.append (line)
+                                line =	string.join (self.multi_line, '\n')
+                                co = compile (line, repr(self), 'exec')
+                                self.multi_line = []
+                        else:
+                            co = compile (line, repr(self), 'exec')
+                    except SyntaxError, why:
+                        if why[0] == 'unexpected EOF while parsing':
+                            self.push ('... ')
+                            self.multi_line.append (line)
+                            return
+                        else:
+                            t,v,tb = sys.exc_info()
+                            del tb
+                            raise t,v
+                    exec co in self.local_env
+                    method = 'exec'
+            except:
+                method = 'exception'
+                self.multi_line = []
+                (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
+                self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
+        finally:
+            sys.stdout = oldout
+            sys.stderr = olderr
+        self.log_info('%s:%s (%s)> %s' % (
+                self.number,
+                self.line_counter,
+                method,
+                repr(line))
+                )
+        self.push_with_producer (p)
+        self.prompt()
+        
+        # for now, we ignore any telnet option stuff sent to
+        # us, and we process the backspace key ourselves.
+        # gee, it would be fun to write a full-blown line-editing
+        # environment, etc...
+    def clean_line (self, line):
+        chars = []
+        for ch in line:
+            oc = ord(ch)
+            if oc < 127:
+                if oc in [8,177]:
+                        # backspace
+                    chars = chars[:-1]
+                else:
+                    chars.append (ch)
+        return string.join (chars, '')
+        
 class monitor_server (asyncore.dispatcher):
-
-	SERVER_IDENT = 'Monitor Server (V%s)' % VERSION
-
-	channel_class = monitor_channel
 
-	def __init__ (self, hostname='127.0.0.1', port=8023):
-		self.hostname = hostname
-		self.port = port
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-		self.set_reuse_addr()
-		self.bind ((hostname, port))
-		self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
-		self.listen (5)
-		self.closed		= 0
-		self.failed_auths = 0
-		self.total_sessions = counter()
-		self.closed_sessions = counter()
-
-	def writable (self):
-		return 0
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		self.log_info('Incoming monitor connection from %s:%d' % addr)
-		self.channel_class (self, conn, addr)
-		self.total_sessions.increment()
-
-	def status (self):
-		return producers.simple_producer (
-			'<h2>%s</h2>'						% self.SERVER_IDENT
-			+ '<br><b>Total Sessions:</b> %s'		% self.total_sessions
-			+ '<br><b>Current Sessions:</b> %d'	% (
-				self.total_sessions.as_long()-self.closed_sessions.as_long()
-				)
-			)
-
+    SERVER_IDENT = 'Monitor Server (V%s)' % VERSION
+    
+    channel_class = monitor_channel
+    
+    def __init__ (self, hostname='127.0.0.1', port=8023):
+        self.hostname = hostname
+        self.port = port
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind ((hostname, port))
+        self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
+        self.listen (5)
+        self.closed		= 0
+        self.failed_auths = 0
+        self.total_sessions = counter()
+        self.closed_sessions = counter()
+        
+    def writable (self):
+        return 0
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        self.log_info('Incoming monitor connection from %s:%d' % addr)
+        self.channel_class (self, conn, addr)
+        self.total_sessions.increment()
+        
+    def status (self):
+        return producers.simple_producer (
+                '<h2>%s</h2>'						% self.SERVER_IDENT
+                + '<br><b>Total Sessions:</b> %s'		% self.total_sessions
+                + '<br><b>Current Sessions:</b> %d'	% (
+                        self.total_sessions.as_long()-self.closed_sessions.as_long()
+                        )
+                )
+        
 def hex_digest (s):
-	m = md5.md5()
-	m.update (s)
-	return string.joinfields (
-		map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
-		'',
-		)
-
+    m = md5.md5()
+    m.update (s)
+    return string.joinfields (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
 class secure_monitor_channel (monitor_channel):
-	authorized = 0
-	
-	def __init__ (self, server, sock, addr):
-		asynchat.async_chat.__init__ (self, sock)
-		self.server = server
-		self.addr = addr
-		self.set_terminator ('\r\n')
-		self.data = ''
-		# local bindings specific to this channel
-		self.local_env = {}
-		# send timestamp string
-		self.timestamp = str(time.time())
-		self.count = 0
-		self.line_counter = counter()
-		self.number = int(server.total_sessions.as_long())
-		self.multi_line = []
-		self.push (self.timestamp + '\r\n')
-
-	def found_terminator (self):
-		if not self.authorized:
-			if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:
-				self.log_info ('%s: failed authorization' % self, 'warning')
-				self.server.failed_auths = self.server.failed_auths + 1
-				self.close()
-			else:
-				self.authorized = 1
-				self.push ('Python ' + sys.version + '\r\n')
-				self.push (sys.copyright+'\r\n')
-				self.push ('Welcome to %s\r\n' % self)
-				self.prompt()
-				self.data = ''
-		else:
-			monitor_channel.found_terminator (self)
-		
+    authorized = 0
+    
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        # local bindings specific to this channel
+        self.local_env = {}
+        # send timestamp string
+        self.timestamp = str(time.time())
+        self.count = 0
+        self.line_counter = counter()
+        self.number = int(server.total_sessions.as_long())
+        self.multi_line = []
+        self.push (self.timestamp + '\r\n')
+        
+    def found_terminator (self):
+        if not self.authorized:
+            if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:
+                self.log_info ('%s: failed authorization' % self, 'warning')
+                self.server.failed_auths = self.server.failed_auths + 1
+                self.close()
+            else:
+                self.authorized = 1
+                self.push ('Python ' + sys.version + '\r\n')
+                self.push (sys.copyright+'\r\n')
+                self.push ('Welcome to %s\r\n' % self)
+                self.prompt()
+                self.data = ''
+        else:
+            monitor_channel.found_terminator (self)
+            
 class secure_encrypted_monitor_channel (secure_monitor_channel):
-	"Wrap send() and recv() with a stream cipher"
-
-	def __init__ (self, server, conn, addr):
-		key = server.password
-		self.outgoing = server.cipher.new (key)
-		self.incoming = server.cipher.new (key)
-		secure_monitor_channel.__init__ (self, server, conn, addr)
-
-	def send (self, data):
-		# send the encrypted data instead
-		ed = self.outgoing.encrypt (data)
-		return secure_monitor_channel.send (self, ed)
-
-	def recv (self, block_size):
-		data = secure_monitor_channel.recv (self, block_size)
-		if data:
-			dd = self.incoming.decrypt (data)
-			return dd
-		else:
-			return data
-
+    "Wrap send() and recv() with a stream cipher"
+    
+    def __init__ (self, server, conn, addr):
+        key = server.password
+        self.outgoing = server.cipher.new (key)
+        self.incoming = server.cipher.new (key)
+        secure_monitor_channel.__init__ (self, server, conn, addr)
+        
+    def send (self, data):
+            # send the encrypted data instead
+        ed = self.outgoing.encrypt (data)
+        return secure_monitor_channel.send (self, ed)
+        
+    def recv (self, block_size):
+        data = secure_monitor_channel.recv (self, block_size)
+        if data:
+            dd = self.incoming.decrypt (data)
+            return dd
+        else:
+            return data
+            
 class secure_monitor_server (monitor_server):
-	channel_class = secure_monitor_channel
-
-	def __init__ (self, password, hostname='', port=8023):
-		monitor_server.__init__ (self, hostname, port)
-		self.password = password
-
-	def status (self):
-		p = monitor_server.status (self)
-		# kludge
-		p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)
-		return p
-
-# don't try to print from within any of the methods
-# of this object. 8^)
-
+    channel_class = secure_monitor_channel
+    
+    def __init__ (self, password, hostname='', port=8023):
+        monitor_server.__init__ (self, hostname, port)
+        self.password = password
+        
+    def status (self):
+        p = monitor_server.status (self)
+        # kludge
+        p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)
+        return p
+        
+        # don't try to print from within any of the methods
+        # of this object. 8^)
+        
 class output_producer:
-	def __init__ (self, channel, real_stderr):
-		self.channel = channel
-		self.data = ''
-		# use _this_ for debug output
-		self.stderr = real_stderr
-
-	def check_data (self):
-		if len(self.data) > 1<<16:
-			# runaway output, close it.
-			self.channel.close()
-			
-	def write (self, data):
-		lines = string.splitfields (data, '\n')
-		data = string.join (lines, '\r\n')
-		self.data = self.data + data
-		self.check_data()
-		
-	def writeline (self, line):
-		self.data = self.data + line + '\r\n'
-		self.check_data()
-		
-	def writelines (self, lines):
-		self.data = self.data + string.joinfields (
-			lines,
-			'\r\n'
-			) + '\r\n'
-		self.check_data()
-
-	def ready (self):
-		return (len (self.data) > 0)
-
-	def flush (self):
-		pass
-
-	def softspace (self, *args):
-		pass
-
-	def more (self):
-		if self.data:
-			result = self.data[:512]
-			self.data = self.data[512:]
-			return result
-		else:
-			return ''
-
+    def __init__ (self, channel, real_stderr):
+        self.channel = channel
+        self.data = ''
+        # use _this_ for debug output
+        self.stderr = real_stderr
+        
+    def check_data (self):
+        if len(self.data) > 1<<16:
+                # runaway output, close it.
+            self.channel.close()
+            
+    def write (self, data):
+        lines = string.splitfields (data, '\n')
+        data = string.join (lines, '\r\n')
+        self.data = self.data + data
+        self.check_data()
+        
+    def writeline (self, line):
+        self.data = self.data + line + '\r\n'
+        self.check_data()
+        
+    def writelines (self, lines):
+        self.data = self.data + string.joinfields (
+                lines,
+                '\r\n'
+                ) + '\r\n'
+        self.check_data()
+        
+    def ready (self):
+        return (len (self.data) > 0)
+        
+    def flush (self):
+        pass
+        
+    def softspace (self, *args):
+        pass
+        
+    def more (self):
+        if self.data:
+            result = self.data[:512]
+            self.data = self.data[512:]
+            return result
+        else:
+            return ''
+            
 if __name__ == '__main__':
-	import string
-	import sys
-	if '-s' in sys.argv:
-		sys.argv.remove ('-s')
-		print 'Enter password: ',
-		password = raw_input()
-	else:
-		password = None
-
-	if '-e' in sys.argv:
-		sys.argv.remove ('-e')
-		encrypt = 1
-	else:
-		encrypt = 0
-
-	if len(sys.argv) > 1:
-		port = string.atoi (sys.argv[1])
-	else:
-		port = 8023
-
-	if password is not None:
-		s = secure_monitor_server (password, '', port)
-		if encrypt:
-			s.channel_class = secure_encrypted_monitor_channel
-			import sapphire
-			s.cipher = sapphire
-	else:
-		s = monitor_server ('', port)
-
-	asyncore.loop(use_poll=1)
+    import string
+    import sys
+    if '-s' in sys.argv:
+        sys.argv.remove ('-s')
+        print 'Enter password: ',
+        password = raw_input()
+    else:
+        password = None
+        
+    if '-e' in sys.argv:
+        sys.argv.remove ('-e')
+        encrypt = 1
+    else:
+        encrypt = 0
+        
+    if len(sys.argv) > 1:
+        port = string.atoi (sys.argv[1])
+    else:
+        port = 8023
+        
+    if password is not None:
+        s = secure_monitor_server (password, '', port)
+        if encrypt:
+            s.channel_class = secure_encrypted_monitor_channel
+            import sapphire
+            s.cipher = sapphire
+    else:
+        s = monitor_server ('', port)
+        
+    asyncore.loop(use_poll=1)

--- Updated File monitor_client.py in package Zope2 --
--- monitor_client.py	2001/04/25 19:07:33	1.7
+++ monitor_client.py	2001/05/01 11:44:48	1.8
@@ -14,113 +14,113 @@
 import time
 
 class stdin_channel (asyncore.file_dispatcher):
-	def handle_read (self):
-		data = self.recv(512)
-		if not data:
-			print '\nclosed.'
-			self.sock_channel.close()
-			try:
-				self.close()
-			except:
-				pass
-			
-		data = regsub.gsub ('\n', '\r\n', data)
-		self.sock_channel.push (data)
-
-	def writable (self):
-		return 0
-
-	def log (self, *ignore):
-		pass
-		
+    def handle_read (self):
+        data = self.recv(512)
+        if not data:
+            print '\nclosed.'
+            self.sock_channel.close()
+            try:
+                self.close()
+            except:
+                pass
+                
+        data = regsub.gsub ('\n', '\r\n', data)
+        self.sock_channel.push (data)
+        
+    def writable (self):
+        return 0
+        
+    def log (self, *ignore):
+        pass
+        
 class monitor_client (asynchat.async_chat):
-	def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
-		asynchat.async_chat.__init__ (self)
-		self.create_socket (socket_type, socket.SOCK_STREAM)
-		self.terminator = '\r\n'
-		self.connect (addr)
-		self.sent_auth = 0
-		self.timestamp = ''
-		self.password = password
-
-	def collect_incoming_data (self, data):
-		if not self.sent_auth:
-			self.timestamp = self.timestamp + data
-		else:
-			sys.stdout.write (data)
-			sys.stdout.flush()
-
-	def found_terminator (self):
-		if not self.sent_auth:
-			self.push (hex_digest (self.timestamp + self.password) + '\r\n')
-			self.sent_auth = 1
-		else:
-			print
-
-	def handle_close (self):
-		# close all the channels, which will make the standard main
-		# loop exit.
-		map (lambda x: x.close(), asyncore.socket_map.values())
-
-	def log (self, *ignore):
-		pass
-
+    def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
+        asynchat.async_chat.__init__ (self)
+        self.create_socket (socket_type, socket.SOCK_STREAM)
+        self.terminator = '\r\n'
+        self.connect (addr)
+        self.sent_auth = 0
+        self.timestamp = ''
+        self.password = password
+        
+    def collect_incoming_data (self, data):
+        if not self.sent_auth:
+            self.timestamp = self.timestamp + data
+        else:
+            sys.stdout.write (data)
+            sys.stdout.flush()
+            
+    def found_terminator (self):
+        if not self.sent_auth:
+            self.push (hex_digest (self.timestamp + self.password) + '\r\n')
+            self.sent_auth = 1
+        else:
+            print
+            
+    def handle_close (self):
+            # close all the channels, which will make the standard main
+            # loop exit.
+        map (lambda x: x.close(), asyncore.socket_map.values())
+        
+    def log (self, *ignore):
+        pass
+        
 class encrypted_monitor_client (monitor_client):
-	"Wrap push() and recv() with a stream cipher"
-
-	def init_cipher (self, cipher, key):
-		self.outgoing = cipher.new (key)
-		self.incoming = cipher.new (key)
-
-	def push (self, data):
-		# push the encrypted data instead
-		return monitor_client.push (self, self.outgoing.encrypt (data))
-
-	def recv (self, block_size):
-		data = monitor_client.recv (self, block_size)
-		if data:
-			return self.incoming.decrypt (data)
-		else:
-			return data
-
+    "Wrap push() and recv() with a stream cipher"
+    
+    def init_cipher (self, cipher, key):
+        self.outgoing = cipher.new (key)
+        self.incoming = cipher.new (key)
+        
+    def push (self, data):
+            # push the encrypted data instead
+        return monitor_client.push (self, self.outgoing.encrypt (data))
+        
+    def recv (self, block_size):
+        data = monitor_client.recv (self, block_size)
+        if data:
+            return self.incoming.decrypt (data)
+        else:
+            return data
+            
 def hex_digest (s):
-	m = md5.md5()
-	m.update (s)
-	return string.join (
-		map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
-		'',
-		)
-
+    m = md5.md5()
+    m.update (s)
+    return string.join (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
 if __name__ == '__main__':
-	if len(sys.argv) == 1:
-		print 'Usage: %s host port' % sys.argv[0]
-		sys.exit(0)
-
-	if ('-e' in sys.argv):
-		encrypt = 1
-		sys.argv.remove ('-e')
-	else:
-		encrypt = 0
-
-	sys.stderr.write ('Enter Password: ')
-	sys.stderr.flush()
-	import os
-	try:
-		os.system ('stty -echo')
-		p = raw_input()
-		print
-	finally:
-		os.system ('stty echo')
-	stdin = stdin_channel (0)
-	if len(sys.argv) > 1:
-		if encrypt:
-			client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
-			import sapphire
-			client.init_cipher (sapphire, p)
-		else:
-			client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
-	else:
-		# default to local host, 'standard' port
-		client = monitor_client (p)
-	stdin.sock_channel = client
-	asyncore.loop()
+    if len(sys.argv) == 1:
+        print 'Usage: %s host port' % sys.argv[0]
+        sys.exit(0)
+        
+    if ('-e' in sys.argv):
+        encrypt = 1
+        sys.argv.remove ('-e')
+    else:
+        encrypt = 0
+        
+    sys.stderr.write ('Enter Password: ')
+    sys.stderr.flush()
+    import os
+    try:
+        os.system ('stty -echo')
+        p = raw_input()
+        print
+    finally:
+        os.system ('stty echo')
+    stdin = stdin_channel (0)
+    if len(sys.argv) > 1:
+        if encrypt:
+            client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
+            import sapphire
+            client.init_cipher (sapphire, p)
+        else:
+            client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
+    else:
+            # default to local host, 'standard' port
+        client = monitor_client (p)
+    stdin.sock_channel = client
+    asyncore.loop()

--- Updated File monitor_client_win32.py in package Zope2 --
--- monitor_client_win32.py	2001/04/25 19:07:33	1.6
+++ monitor_client_win32.py	2001/05/01 11:44:48	1.7
@@ -13,41 +13,41 @@
 import md5
 
 def hex_digest (s):
-	m = md5.md5()
-	m.update (s)
-	return string.join (
-		map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
-		'',
-		)
-
+    m = md5.md5()
+    m.update (s)
+    return string.join (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
 def reader (lock, sock, password):
-	# first grab the timestamp
-	ts = sock.recv (1024)[:-2]
-	sock.send (hex_digest (ts+password) + '\r\n')
-	while 1:
-		d = sock.recv (1024)
-		if not d:
-			lock.release()
-			print 'Connection closed.  Hit <return> to exit'
-			thread.exit()
-		sys.stdout.write (d)
-		sys.stdout.flush()
-
+        # first grab the timestamp
+    ts = sock.recv (1024)[:-2]
+    sock.send (hex_digest (ts+password) + '\r\n')
+    while 1:
+        d = sock.recv (1024)
+        if not d:
+            lock.release()
+            print 'Connection closed.  Hit <return> to exit'
+            thread.exit()
+        sys.stdout.write (d)
+        sys.stdout.flush()
+        
 def writer (lock, sock, barrel="just kidding"):
-	while lock.locked():
-		sock.send (
-			sys.stdin.readline()[:-1] + '\r\n'
-			)
-
+    while lock.locked():
+        sock.send (
+                sys.stdin.readline()[:-1] + '\r\n'
+                )
+        
 if __name__ == '__main__':
-	if len(sys.argv) == 1:
-		print 'Usage: %s host port'
-		sys.exit(0)
-	print 'Enter Password: ',
-	p = raw_input()
-	s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
-	s.connect ((sys.argv[1], string.atoi(sys.argv[2])))
-	l = thread.allocate_lock()
-	l.acquire()
-	thread.start_new_thread (reader, (l, s, p))
-	writer (l, s)
+    if len(sys.argv) == 1:
+        print 'Usage: %s host port'
+        sys.exit(0)
+    print 'Enter Password: ',
+    p = raw_input()
+    s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
+    s.connect ((sys.argv[1], string.atoi(sys.argv[2])))
+    l = thread.allocate_lock()
+    l.acquire()
+    thread.start_new_thread (reader, (l, s, p))
+    writer (l, s)

--- Updated File producers.py in package Zope2 --
--- producers.py	2001/04/25 19:07:33	1.9
+++ producers.py	2001/05/01 11:44:48	1.10
@@ -14,318 +14,318 @@
 """
 
 class simple_producer:
-	"producer for a string"
-	def __init__ (self, data, buffer_size=1024):
-		self.data = data
-		self.buffer_size = buffer_size
-
-	def more (self):
-		if len (self.data) > self.buffer_size:
-			result = self.data[:self.buffer_size]
-			self.data = self.data[self.buffer_size:]
-			return result
-		else:
-			result = self.data
-			self.data = ''
-			return result
-
+    "producer for a string"
+    def __init__ (self, data, buffer_size=1024):
+        self.data = data
+        self.buffer_size = buffer_size
+        
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+            
 class scanning_producer:
-	"like simple_producer, but more efficient for large strings"
-	def __init__ (self, data, buffer_size=1024):
-		self.data = data
-		self.buffer_size = buffer_size
-		self.pos = 0
-
-	def more (self):
-		if self.pos < len(self.data):
-			lp = self.pos
-			rp = min (
-				len(self.data),
-				self.pos + self.buffer_size
-				)
-			result = self.data[lp:rp]
-			self.pos = self.pos + len(result)
-			return result
-		else:
-			return ''
-
+    "like simple_producer, but more efficient for large strings"
+    def __init__ (self, data, buffer_size=1024):
+        self.data = data
+        self.buffer_size = buffer_size
+        self.pos = 0
+        
+    def more (self):
+        if self.pos < len(self.data):
+            lp = self.pos
+            rp = min (
+                    len(self.data),
+                    self.pos + self.buffer_size
+                    )
+            result = self.data[lp:rp]
+            self.pos = self.pos + len(result)
+            return result
+        else:
+            return ''
+            
 class lines_producer:
-	"producer for a list of lines"
-
-	def __init__ (self, lines):
-		self.lines = lines
-
-	def ready (self):
-		return len(self.lines)
-
-	def more (self):
-		if self.lines:
-			chunk = self.lines[:50]
-			self.lines = self.lines[50:]
-			return string.join (chunk, '\r\n') + '\r\n'
-		else:
-			return ''
-
+    "producer for a list of lines"
+    
+    def __init__ (self, lines):
+        self.lines = lines
+        
+    def ready (self):
+        return len(self.lines)
+        
+    def more (self):
+        if self.lines:
+            chunk = self.lines[:50]
+            self.lines = self.lines[50:]
+            return string.join (chunk, '\r\n') + '\r\n'
+        else:
+            return ''
+            
 class buffer_list_producer:
-	"producer for a list of buffers"
-
-	# i.e., data == string.join (buffers, '')
-	
-	def __init__ (self, buffers):
-
-		self.index = 0
-		self.buffers = buffers
-
-	def more (self):
-		if self.index >= len(self.buffers):
-			return ''
-		else:
-			data = self.buffers[self.index]
-			self.index = self.index + 1
-			return data
-
+    "producer for a list of buffers"
+    
+    # i.e., data == string.join (buffers, '')
+    
+    def __init__ (self, buffers):
+    
+        self.index = 0
+        self.buffers = buffers
+        
+    def more (self):
+        if self.index >= len(self.buffers):
+            return ''
+        else:
+            data = self.buffers[self.index]
+            self.index = self.index + 1
+            return data
+            
 class file_producer:
-	"producer wrapper for file[-like] objects"
-
-	# match http_channel's outgoing buffer size
-	out_buffer_size = 1<<16
-
-	def __init__ (self, file):
-		self.done = 0
-		self.file = file
-
-	def more (self):
-		if self.done:
-			return ''
-		else:
-			data = self.file.read (self.out_buffer_size)
-			if not data:
-				self.file.close()
-				del self.file
-				self.done = 1
-				return ''
-			else:
-				return data
-
-# A simple output producer.  This one does not [yet] have
-# the safety feature builtin to the monitor channel:  runaway
-# output will not be caught.
-
-# don't try to print from within any of the methods
-# of this object.
-
+    "producer wrapper for file[-like] objects"
+    
+    # match http_channel's outgoing buffer size
+    out_buffer_size = 1<<16
+    
+    def __init__ (self, file):
+        self.done = 0
+        self.file = file
+        
+    def more (self):
+        if self.done:
+            return ''
+        else:
+            data = self.file.read (self.out_buffer_size)
+            if not data:
+                self.file.close()
+                del self.file
+                self.done = 1
+                return ''
+            else:
+                return data
+                
+                # A simple output producer.  This one does not [yet] have
+                # the safety feature builtin to the monitor channel:  runaway
+                # output will not be caught.
+                
+                # don't try to print from within any of the methods
+                # of this object.
+                
 class output_producer:
-	"Acts like an output file; suitable for capturing sys.stdout"
-	def __init__ (self):
-		self.data = ''
-			
-	def write (self, data):
-		lines = string.splitfields (data, '\n')
-		data = string.join (lines, '\r\n')
-		self.data = self.data + data
-		
-	def writeline (self, line):
-		self.data = self.data + line + '\r\n'
-		
-	def writelines (self, lines):
-		self.data = self.data + string.joinfields (
-			lines,
-			'\r\n'
-			) + '\r\n'
-
-	def ready (self):
-		return (len (self.data) > 0)
-
-	def flush (self):
-		pass
-
-	def softspace (self, *args):
-		pass
-
-	def more (self):
-		if self.data:
-			result = self.data[:512]
-			self.data = self.data[512:]
-			return result
-		else:
-			return ''
-
+    "Acts like an output file; suitable for capturing sys.stdout"
+    def __init__ (self):
+        self.data = ''
+        
+    def write (self, data):
+        lines = string.splitfields (data, '\n')
+        data = string.join (lines, '\r\n')
+        self.data = self.data + data
+        
+    def writeline (self, line):
+        self.data = self.data + line + '\r\n'
+        
+    def writelines (self, lines):
+        self.data = self.data + string.joinfields (
+                lines,
+                '\r\n'
+                ) + '\r\n'
+        
+    def ready (self):
+        return (len (self.data) > 0)
+        
+    def flush (self):
+        pass
+        
+    def softspace (self, *args):
+        pass
+        
+    def more (self):
+        if self.data:
+            result = self.data[:512]
+            self.data = self.data[512:]
+            return result
+        else:
+            return ''
+            
 class composite_producer:
-	"combine a fifo of producers into one"
-	def __init__ (self, producers):
-		self.producers = producers
-
-	def more (self):
-		while len(self.producers):
-			p = self.producers.first()
-			d = p.more()
-			if d:
-				return d
-			else:
-				self.producers.pop()
-		else:
-			return ''
-
-
+    "combine a fifo of producers into one"
+    def __init__ (self, producers):
+        self.producers = producers
+        
+    def more (self):
+        while len(self.producers):
+            p = self.producers.first()
+            d = p.more()
+            if d:
+                return d
+            else:
+                self.producers.pop()
+        else:
+            return ''
+            
+            
 class globbing_producer:
-	"""
-	'glob' the output from a producer into a particular buffer size.
-	helps reduce the number of calls to send().  [this appears to
-	gain about 30% performance on requests to a single channel]
-	"""
-
-	def __init__ (self, producer, buffer_size=1<<16):
-		self.producer = producer
-		self.buffer = ''
-		self.buffer_size = buffer_size
-
-	def more (self):
-		while len(self.buffer) < self.buffer_size:
-			data = self.producer.more()
-			if data:
-				self.buffer = self.buffer + data
-			else:
-				break
-		r = self.buffer
-		self.buffer = ''
-		return r
-
-
+    """
+    'glob' the output from a producer into a particular buffer size.
+    helps reduce the number of calls to send().  [this appears to
+    gain about 30% performance on requests to a single channel]
+    """
+    
+    def __init__ (self, producer, buffer_size=1<<16):
+        self.producer = producer
+        self.buffer = ''
+        self.buffer_size = buffer_size
+        
+    def more (self):
+        while len(self.buffer) < self.buffer_size:
+            data = self.producer.more()
+            if data:
+                self.buffer = self.buffer + data
+            else:
+                break
+        r = self.buffer
+        self.buffer = ''
+        return r
+        
+        
 class hooked_producer:
-	"""
-	A producer that will call <function> when it empties,.
-	with an argument of the number of bytes produced.  Useful
-	for logging/instrumentation purposes.
-	"""
-
-	def __init__ (self, producer, function):
-		self.producer = producer
-		self.function = function
-		self.bytes = 0
-
-	def more (self):
-		if self.producer:
-			result = self.producer.more()
-			if not result:
-				self.producer = None
-				self.function (self.bytes)
-			else:
-				self.bytes = self.bytes + len(result)
-			return result
-		else:
-			return ''
-
-# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
-# correct.  In the face of Strange Files, it is conceivable that
-# reading a 'file' may produce an amount of data not matching that
-# reported by os.stat() [text/binary mode issues, perhaps the file is
-# being appended to, etc..]  This makes the chunked encoding a True
-# Blessing, and it really ought to be used even with normal files.
-# How beautifully it blends with the concept of the producer.
-
+    """
+    A producer that will call <function> when it empties,.
+    with an argument of the number of bytes produced.  Useful
+    for logging/instrumentation purposes.
+    """
+    
+    def __init__ (self, producer, function):
+        self.producer = producer
+        self.function = function
+        self.bytes = 0
+        
+    def more (self):
+        if self.producer:
+            result = self.producer.more()
+            if not result:
+                self.producer = None
+                self.function (self.bytes)
+            else:
+                self.bytes = self.bytes + len(result)
+            return result
+        else:
+            return ''
+            
+            # HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
+            # correct.  In the face of Strange Files, it is conceivable that
+            # reading a 'file' may produce an amount of data not matching that
+            # reported by os.stat() [text/binary mode issues, perhaps the file is
+            # being appended to, etc..]  This makes the chunked encoding a True
+            # Blessing, and it really ought to be used even with normal files.
+            # How beautifully it blends with the concept of the producer.
+            
 class chunked_producer:
-	"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
-	Here is a sample usage:
-		request['Transfer-Encoding'] = 'chunked'
-		request.push (
-			producers.chunked_producer (your_producer)
-			)
-		request.done()
-	"""
-
-	def __init__ (self, producer, footers=None):
-		self.producer = producer
-		self.footers = footers
-
-	def more (self):
-		if self.producer:
-			data = self.producer.more()
-			if data:
-				return '%x\r\n%s\r\n' % (len(data), data)
-			else:
-				self.producer = None
-				if self.footers:
-					return string.join (
-						['0'] + self.footers,
-						'\r\n'
-						) + '\r\n\r\n'
-				else:
-					return '0\r\n\r\n'
-		else:
-			return ''
-
-# Unfortunately this isn't very useful right now (Aug 97), because
-# apparently the browsers don't do on-the-fly decompression.  Which
-# is sad, because this could _really_ speed things up, especially for
-# low-bandwidth clients (i.e., most everyone).
-
+    """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
+    Here is a sample usage:
+            request['Transfer-Encoding'] = 'chunked'
+            request.push (
+                    producers.chunked_producer (your_producer)
+                    )
+            request.done()
+    """
+    
+    def __init__ (self, producer, footers=None):
+        self.producer = producer
+        self.footers = footers
+        
+    def more (self):
+        if self.producer:
+            data = self.producer.more()
+            if data:
+                return '%x\r\n%s\r\n' % (len(data), data)
+            else:
+                self.producer = None
+                if self.footers:
+                    return string.join (
+                            ['0'] + self.footers,
+                            '\r\n'
+                            ) + '\r\n\r\n'
+                else:
+                    return '0\r\n\r\n'
+        else:
+            return ''
+            
+            # Unfortunately this isn't very useful right now (Aug 97), because
+            # apparently the browsers don't do on-the-fly decompression.  Which
+            # is sad, because this could _really_ speed things up, especially for
+            # low-bandwidth clients (i.e., most everyone).
+            
 try:
-	import zlib
+    import zlib
 except ImportError:
-	zlib = None
-
+    zlib = None
+    
 class compressed_producer:
-	"""
-	Compress another producer on-the-fly, using ZLIB
-	[Unfortunately, none of the current browsers seem to support this]
-	"""
-
-	# Note: It's not very efficient to have the server repeatedly
-	# compressing your outgoing files: compress them ahead of time, or
-	# use a compress-once-and-store scheme.  However, if you have low
-	# bandwidth and low traffic, this may make more sense than
-	# maintaining your source files compressed.
-	#
-	# Can also be used for compressing dynamically-produced output.
-
-	def __init__ (self, producer, level=5):
-		self.producer = producer
-		self.compressor = zlib.compressobj (level)
-
-	def more (self):
-		if self.producer:
-			cdata = ''
-			# feed until we get some output
-			while not cdata:
-				data = self.producer.more()
-				if not data:
-					self.producer = None
-					return self.compressor.flush()
-				else:
-					cdata = self.compressor.compress (data)
-			return cdata
-		else:
-			return ''
-
+    """
+    Compress another producer on-the-fly, using ZLIB
+    [Unfortunately, none of the current browsers seem to support this]
+    """
+    
+    # Note: It's not very efficient to have the server repeatedly
+    # compressing your outgoing files: compress them ahead of time, or
+    # use a compress-once-and-store scheme.  However, if you have low
+    # bandwidth and low traffic, this may make more sense than
+    # maintaining your source files compressed.
+    #
+    # Can also be used for compressing dynamically-produced output.
+    
+    def __init__ (self, producer, level=5):
+        self.producer = producer
+        self.compressor = zlib.compressobj (level)
+        
+    def more (self):
+        if self.producer:
+            cdata = ''
+            # feed until we get some output
+            while not cdata:
+                data = self.producer.more()
+                if not data:
+                    self.producer = None
+                    return self.compressor.flush()
+                else:
+                    cdata = self.compressor.compress (data)
+            return cdata
+        else:
+            return ''
+            
 class escaping_producer:
-
-	"A producer that escapes a sequence of characters"
-	" Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
-
-	def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
-		self.producer = producer
-		self.esc_from = esc_from
-		self.esc_to = esc_to
-		self.buffer = ''
-		from asynchat import find_prefix_at_end
-		self.find_prefix_at_end = find_prefix_at_end
-
-	def more (self):
-		esc_from = self.esc_from
-		esc_to   = self.esc_to
-
-		buffer = self.buffer + self.producer.more()
 
-		if buffer:
-			buffer = string.replace (buffer, esc_from, esc_to)
-			i = self.find_prefix_at_end (buffer, esc_from)
-			if i:
-				# we found a prefix
-				self.buffer = buffer[-i:]
-				return buffer[:-i]
-			else:
-				# no prefix, return it all
-				self.buffer = ''
-				return buffer
-		else:
-			return buffer
+    "A producer that escapes a sequence of characters"
+    " Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
+    
+    def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
+        self.producer = producer
+        self.esc_from = esc_from
+        self.esc_to = esc_to
+        self.buffer = ''
+        from asynchat import find_prefix_at_end
+        self.find_prefix_at_end = find_prefix_at_end
+        
+    def more (self):
+        esc_from = self.esc_from
+        esc_to   = self.esc_to
+        
+        buffer = self.buffer + self.producer.more()
+        
+        if buffer:
+            buffer = string.replace (buffer, esc_from, esc_to)
+            i = self.find_prefix_at_end (buffer, esc_from)
+            if i:
+                    # we found a prefix
+                self.buffer = buffer[-i:]
+                return buffer[:-i]
+            else:
+                    # no prefix, return it all
+                self.buffer = ''
+                return buffer
+        else:
+            return buffer

--- Updated File put_handler.py in package Zope2 --
--- put_handler.py	2001/04/25 19:07:33	1.2
+++ put_handler.py	2001/05/01 11:44:48	1.3
@@ -17,99 +17,99 @@
 last_request = None
 
 class put_handler:
-	def __init__ (self, filesystem, uri_regex):
-		self.filesystem = filesystem
-		if type (uri_regex) == type(''):
-			self.uri_regex = re.compile (uri_regex)
-		else:
-			self.uri_regex = uri_regex
-
-	def match (self, request):
-		uri = request.uri
-		if request.command == 'put':
-			m = self.uri_regex.match (uri)
-			if m and m.end() == len(uri):
-				return 1
-		return 0
-
-	def handle_request (self, request):
-
-		path, params, query, fragment = request.split_uri()
-
-		# strip off leading slashes
-		while path and path[0] == '/':
-			path = path[1:]
-
-		if '%' in path:
-			path = unquote (path)
-
-		# make sure there's a content-length header
-		cl = get_header (CONTENT_LENGTH, request.header)
-		if not cl:
-			request.error (411)
-			return
-		else:
-			cl = string.atoi (cl)
-
-		# don't let the try to overwrite a directory
-		if self.filesystem.isdir (path):
-			request.error (405)
-			return
-
-		is_update = self.filesystem.isfile (path)
-
-		try:
-			output_file = self.filesystem.open (path, 'wb')
-		except:
-			request.error (405)
-			return
-		
-		request.collector = put_collector (output_file, cl, request, is_update)
-
-		# no terminator while receiving PUT data
-		request.channel.set_terminator (None)
-
-		# don't respond yet, wait until we've received the data...
-		
+    def __init__ (self, filesystem, uri_regex):
+        self.filesystem = filesystem
+        if type (uri_regex) == type(''):
+            self.uri_regex = re.compile (uri_regex)
+        else:
+            self.uri_regex = uri_regex
+            
+    def match (self, request):
+        uri = request.uri
+        if request.command == 'put':
+            m = self.uri_regex.match (uri)
+            if m and m.end() == len(uri):
+                return 1
+        return 0
+        
+    def handle_request (self, request):
+    
+        path, params, query, fragment = request.split_uri()
+        
+        # strip off leading slashes
+        while path and path[0] == '/':
+            path = path[1:]
+            
+        if '%' in path:
+            path = unquote (path)
+            
+            # make sure there's a content-length header
+        cl = get_header (CONTENT_LENGTH, request.header)
+        if not cl:
+            request.error (411)
+            return
+        else:
+            cl = string.atoi (cl)
+            
+            # don't let the try to overwrite a directory
+        if self.filesystem.isdir (path):
+            request.error (405)
+            return
+            
+        is_update = self.filesystem.isfile (path)
+        
+        try:
+            output_file = self.filesystem.open (path, 'wb')
+        except:
+            request.error (405)
+            return
+            
+        request.collector = put_collector (output_file, cl, request, is_update)
+        
+        # no terminator while receiving PUT data
+        request.channel.set_terminator (None)
+        
+        # don't respond yet, wait until we've received the data...
+        
 class put_collector:
-	def __init__ (self, file, length, request, is_update):
-		self.file		= file
-		self.length		= length
-		self.request	= request
-		self.is_update	= is_update
-		self.bytes_in	= 0
-
-	def collect_incoming_data (self, data):
-		ld = len(data)
-		bi = self.bytes_in
-		if (bi + ld) >= self.length:
-			# last bit of data
-			chunk = self.length - bi
-			self.file.write (data[:chunk])
-			self.file.close()
-
-			if chunk != ld:
-				print 'orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:]))
-
-			# do some housekeeping
-			r = self.request
-			ch = r.channel
-			ch.current_request = None
-			# set the terminator back to the default
-			ch.set_terminator ('\r\n\r\n')
-			if self.is_update:
-				r.reply_code = 204 # No content
-				r.done()
-			else:
-				r.reply_now (201) # Created
-			# avoid circular reference
-			del self.request
-		else:
-			self.file.write (data)
-			self.bytes_in = self.bytes_in + ld
-
-	def found_terminator (self):
-		# shouldn't be called
-		pass
-
+    def __init__ (self, file, length, request, is_update):
+        self.file		= file
+        self.length		= length
+        self.request	= request
+        self.is_update	= is_update
+        self.bytes_in	= 0
+        
+    def collect_incoming_data (self, data):
+        ld = len(data)
+        bi = self.bytes_in
+        if (bi + ld) >= self.length:
+                # last bit of data
+            chunk = self.length - bi
+            self.file.write (data[:chunk])
+            self.file.close()
+            
+            if chunk != ld:
+                print 'orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:]))
+                
+                # do some housekeeping
+            r = self.request
+            ch = r.channel
+            ch.current_request = None
+            # set the terminator back to the default
+            ch.set_terminator ('\r\n\r\n')
+            if self.is_update:
+                r.reply_code = 204 # No content
+                r.done()
+            else:
+                r.reply_now (201) # Created
+                # avoid circular reference
+            del self.request
+        else:
+            self.file.write (data)
+            self.bytes_in = self.bytes_in + ld
+            
+    def found_terminator (self):
+            # shouldn't be called
+        pass
+        
 CONTENT_LENGTH = re.compile ('Content-Length: ([0-9]+)', re.IGNORECASE)

--- Updated File redirecting_handler.py in package Zope2 --
--- redirecting_handler.py	2001/04/25 19:07:33	1.2
+++ redirecting_handler.py	2001/05/01 11:44:49	1.3
@@ -12,35 +12,35 @@
 
 class redirecting_handler:
 
-	def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
-		self.pattern = pattern
-		self.redirect = redirect
-		self.patreg = re.compile (pattern, regex_flag)
-		self.hits = counter.counter()
-
-	def match (self, request):
-		m = self.patref.match (request.uri)
-		return (m and (m.end() == len(request.uri)))
-			
-	def handle_request (self, request):
-		self.hits.increment()
-		m = self.patreg.match (request.uri)
-		part = m.group(1)
-
-		request['Location'] = self.redirect % part
-		request.error (302) # moved temporarily
-
-	def __repr__ (self):
-		return '<Redirecting Handler at %08x [%s => %s]>' % (
-			id(self),
-			repr(self.pattern),
-			repr(self.redirect)
-			)
-
-	def status (self):
-		import producers
-		return producers.simple_producer (
-			'<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
-				self.pattern, self.redirect, self.hits
-				)
-			)
+    def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
+        self.pattern = pattern
+        self.redirect = redirect
+        self.patreg = re.compile (pattern, regex_flag)
+        self.hits = counter.counter()
+        
+    def match (self, request):
+        m = self.patref.match (request.uri)
+        return (m and (m.end() == len(request.uri)))
+        
+    def handle_request (self, request):
+        self.hits.increment()
+        m = self.patreg.match (request.uri)
+        part = m.group(1)
+        
+        request['Location'] = self.redirect % part
+        request.error (302) # moved temporarily
+        
+    def __repr__ (self):
+        return '<Redirecting Handler at %08x [%s => %s]>' % (
+                id(self),
+                repr(self.pattern),
+                repr(self.redirect)
+                )
+        
+    def status (self):
+        import producers
+        return producers.simple_producer (
+                '<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
+                        self.pattern, self.redirect, self.hits
+                        )
+                )

--- Updated File resolver.py in package Zope2 --
--- resolver.py	2001/04/25 19:07:34	1.8
+++ resolver.py	2001/05/01 11:44:49	1.9
@@ -54,389 +54,389 @@
 
 # build a DNS address request, _quickly_
 def fast_address_request (host, id=0):
-	return (
-		'%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
-		+ '\001\000\000\001\000\000\000\000\000\000%s\000\000\001\000\001' % (
-			string.join (
-				map (
-					lambda part: '%c%s' % (chr(len(part)),part),
-					string.split (host, '.')
-					), ''
-				)
-			)
-		)
-
+    return (
+            '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
+            + '\001\000\000\001\000\000\000\000\000\000%s\000\000\001\000\001' % (
+                    string.join (
+                            map (
+                                    lambda part: '%c%s' % (chr(len(part)),part),
+                                    string.split (host, '.')
+                                    ), ''
+                            )
+                    )
+            )
+    
 def fast_ptr_request (host, id=0):
-	return (
-		'%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
-		+ '\001\000\000\001\000\000\000\000\000\000%s\000\000\014\000\001' % (
-			string.join (
-				map (
-					lambda part: '%c%s' % (chr(len(part)),part),
-					string.split (host, '.')
-					), ''
-				)
-			)
-		)
-
+    return (
+            '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
+            + '\001\000\000\001\000\000\000\000\000\000%s\000\000\014\000\001' % (
+                    string.join (
+                            map (
+                                    lambda part: '%c%s' % (chr(len(part)),part),
+                                    string.split (host, '.')
+                                    ), ''
+                            )
+                    )
+            )
+    
 def unpack_name (r,pos):
-	n = []
-	while 1:
-		ll = ord(r[pos])
-		if (ll&0xc0):
-			# compression
-			pos = (ll&0x3f << 8) + (ord(r[pos+1]))
-		elif ll == 0:
-			break			
-		else:
-			pos = pos + 1
-			n.append (r[pos:pos+ll])
-			pos = pos + ll
-	return string.join (n,'.')
-
+    n = []
+    while 1:
+        ll = ord(r[pos])
+        if (ll&0xc0):
+                # compression
+            pos = (ll&0x3f << 8) + (ord(r[pos+1]))
+        elif ll == 0:
+            break			
+        else:
+            pos = pos + 1
+            n.append (r[pos:pos+ll])
+            pos = pos + ll
+    return string.join (n,'.')
+    
 def skip_name (r,pos):
-	s = pos
-	while 1:
-		ll = ord(r[pos])
-		if (ll&0xc0):
-			# compression
-			return pos + 2
-		elif ll == 0:
-			pos = pos + 1
-			break
-		else:
-			pos = pos + ll + 1
-	return pos
-
+    s = pos
+    while 1:
+        ll = ord(r[pos])
+        if (ll&0xc0):
+                # compression
+            return pos + 2
+        elif ll == 0:
+            pos = pos + 1
+            break
+        else:
+            pos = pos + ll + 1
+    return pos
+    
 def unpack_ttl (r,pos):
-	return reduce (
-		lambda x,y: (x<<8)|y,
-		map (ord, r[pos:pos+4])
-		)
-
-# resource record
-#                                    1  1  1  1  1  1
-#      0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#    |                                               |
-#    /                                               /
-#    /                      NAME                     /
-#    |                                               |
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#    |                      TYPE                     |
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#    |                     CLASS                     |
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#    |                      TTL                      |
-#    |                                               |
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#    |                   RDLENGTH                    |
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
-#    /                     RDATA                     /
-#    /                                               /
-#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-
+    return reduce (
+            lambda x,y: (x<<8)|y,
+            map (ord, r[pos:pos+4])
+            )
+    
+    # resource record
+    #                                    1  1  1  1  1  1
+    #      0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                                               |
+    #    /                                               /
+    #    /                      NAME                     /
+    #    |                                               |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                      TYPE                     |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                     CLASS                     |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                      TTL                      |
+    #    |                                               |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                   RDLENGTH                    |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+    #    /                     RDATA                     /
+    #    /                                               /
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    
 def unpack_address_reply (r):
-	ancount = (ord(r[6])<<8) + (ord(r[7]))
-	# skip question, first name starts at 12,
-	# this is followed by QTYPE and QCLASS
-	pos = skip_name (r, 12) + 4
-	if ancount:
-		# we are looking very specifically for
-		# an answer with TYPE=A, CLASS=IN (\000\001\000\001)
-		for an in range(ancount):
-			pos = skip_name (r, pos)
-			if r[pos:pos+4] == '\000\001\000\001':
-				return (
-					unpack_ttl (r,pos+4),
-					'%d.%d.%d.%d' % tuple(map(ord,r[pos+10:pos+14]))
-					)
-			# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
-			pos = pos + 8
-			rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
-			pos = pos + 2 + rdlength
-		return 0, None
-	else:
-		return 0, None
-
+    ancount = (ord(r[6])<<8) + (ord(r[7]))
+    # skip question, first name starts at 12,
+    # this is followed by QTYPE and QCLASS
+    pos = skip_name (r, 12) + 4
+    if ancount:
+            # we are looking very specifically for
+            # an answer with TYPE=A, CLASS=IN (\000\001\000\001)
+        for an in range(ancount):
+            pos = skip_name (r, pos)
+            if r[pos:pos+4] == '\000\001\000\001':
+                return (
+                        unpack_ttl (r,pos+4),
+                        '%d.%d.%d.%d' % tuple(map(ord,r[pos+10:pos+14]))
+                        )
+                # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
+            pos = pos + 8
+            rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
+            pos = pos + 2 + rdlength
+        return 0, None
+    else:
+        return 0, None
+        
 def unpack_ptr_reply (r):
-	ancount = (ord(r[6])<<8) + (ord(r[7]))
-	# skip question, first name starts at 12,
-	# this is followed by QTYPE and QCLASS
-	pos = skip_name (r, 12) + 4
-	if ancount:
-		# we are looking very specifically for
-		# an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
-		for an in range(ancount):
-			pos = skip_name (r, pos)
-			if r[pos:pos+4] == '\000\014\000\001':
-				return (
-					unpack_ttl (r,pos+4),
-					unpack_name (r, pos+10)
-					)
-			# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
-			pos = pos + 8
-			rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
-			pos = pos + 2 + rdlength
-		return 0, None
-	else:
-		return 0, None
-
-
-# This is a UDP (datagram) resolver.
-
-#
-# It may be useful to implement a TCP resolver.  This would presumably
-# give us more reliable behavior when things get too busy.  A TCP
-# client would have to manage the connection carefully, since the
-# server is allowed to close it at will (the RFC recommends closing
-# after 2 minutes of idle time).
-#
-# Note also that the TCP client will have to prepend each request
-# with a 2-byte length indicator (see rfc1035).
-#
-
+    ancount = (ord(r[6])<<8) + (ord(r[7]))
+    # skip question, first name starts at 12,
+    # this is followed by QTYPE and QCLASS
+    pos = skip_name (r, 12) + 4
+    if ancount:
+            # we are looking very specifically for
+            # an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
+        for an in range(ancount):
+            pos = skip_name (r, pos)
+            if r[pos:pos+4] == '\000\014\000\001':
+                return (
+                        unpack_ttl (r,pos+4),
+                        unpack_name (r, pos+10)
+                        )
+                # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
+            pos = pos + 8
+            rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
+            pos = pos + 2 + rdlength
+        return 0, None
+    else:
+        return 0, None
+        
+        
+        # This is a UDP (datagram) resolver.
+        
+        #
+        # It may be useful to implement a TCP resolver.  This would presumably
+        # give us more reliable behavior when things get too busy.  A TCP
+        # client would have to manage the connection carefully, since the
+        # server is allowed to close it at will (the RFC recommends closing
+        # after 2 minutes of idle time).
+        #
+        # Note also that the TCP client will have to prepend each request
+        # with a 2-byte length indicator (see rfc1035).
+        #
+        
 class resolver (asyncore.dispatcher):
-	id = counter()
-	def __init__ (self, server='127.0.0.1'):
-		asyncore.dispatcher.__init__ (self)
-		self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)
-		self.server = server
-		self.request_map = {}
-		self.last_reap_time = int(time.time())      # reap every few minutes
-
-	def writable (self):
-		return 0
-
-	def log (self, *args):
-		pass
-
-	def handle_close (self):
-		self.log_info('closing!')
-		self.close()
-
-	def handle_error (self):      # don't close the connection on error
-		(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
-		self.log_info(
-				'Problem with DNS lookup (%s:%s %s)' % (t, v, tbinfo),
-				'error')
-
-	def get_id (self):
-		return (self.id.as_long() % (1<<16))
-
-	def reap (self):          # find DNS requests that have timed out
-		now = int(time.time())
-		if now - self.last_reap_time > 180:        # reap every 3 minutes
-			self.last_reap_time = now              # update before we forget
-			for k,(host,unpack,callback,when) in self.request_map.items():
-				if now - when > 180:               # over 3 minutes old
-					del self.request_map[k]
-					try:                           # same code as in handle_read
-						callback (host, 0, None)   # timeout val is (0,None) 
-					except:
-						(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
-						self.log_info('%s %s %s' % (t,v,tbinfo), 'error')
-
-	def resolve (self, host, callback):
-		self.reap()                                # first, get rid of old guys
-		self.socket.sendto (
-			fast_address_request (host, self.get_id()),
-			(self.server, 53)
-			)
-		self.request_map [self.get_id()] = (
-			host, unpack_address_reply, callback, int(time.time()))
-		self.id.increment()
-
-	def resolve_ptr (self, host, callback):
-		self.reap()                                # first, get rid of old guys
-		ip = string.split (host, '.')
-		ip.reverse()
-		ip = string.join (ip, '.') + '.in-addr.arpa'
-		self.socket.sendto (
-			fast_ptr_request (ip, self.get_id()),
-			(self.server, 53)
-			)
-		self.request_map [self.get_id()] = (
-			host, unpack_ptr_reply, callback, int(time.time()))
-		self.id.increment()
-
-	def handle_read (self):
-		reply, whence = self.socket.recvfrom (512)
-		# for security reasons we may want to double-check
-		# that <whence> is the server we sent the request to.
-		id = (ord(reply[0])<<8) + ord(reply[1])
-		if self.request_map.has_key (id):
-			host, unpack, callback, when = self.request_map[id]
-			del self.request_map[id]
-			ttl, answer = unpack (reply)
-			try:
-				callback (host, ttl, answer)
-			except:
-				(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
-				self.log_info('%s %s %s' % ( t,v,tbinfo), 'error')
-
+    id = counter()
+    def __init__ (self, server='127.0.0.1'):
+        asyncore.dispatcher.__init__ (self)
+        self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)
+        self.server = server
+        self.request_map = {}
+        self.last_reap_time = int(time.time())      # reap every few minutes
+        
+    def writable (self):
+        return 0
+        
+    def log (self, *args):
+        pass
+        
+    def handle_close (self):
+        self.log_info('closing!')
+        self.close()
+        
+    def handle_error (self):      # don't close the connection on error
+        (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+        self.log_info(
+                        'Problem with DNS lookup (%s:%s %s)' % (t, v, tbinfo),
+                        'error')
+        
+    def get_id (self):
+        return (self.id.as_long() % (1<<16))
+        
+    def reap (self):          # find DNS requests that have timed out
+        now = int(time.time())
+        if now - self.last_reap_time > 180:        # reap every 3 minutes
+            self.last_reap_time = now              # update before we forget
+            for k,(host,unpack,callback,when) in self.request_map.items():
+                if now - when > 180:               # over 3 minutes old
+                    del self.request_map[k]
+                    try:                           # same code as in handle_read
+                        callback (host, 0, None)   # timeout val is (0,None) 
+                    except:
+                        (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+                        self.log_info('%s %s %s' % (t,v,tbinfo), 'error')
+                        
+    def resolve (self, host, callback):
+        self.reap()                                # first, get rid of old guys
+        self.socket.sendto (
+                fast_address_request (host, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = (
+                host, unpack_address_reply, callback, int(time.time()))
+        self.id.increment()
+        
+    def resolve_ptr (self, host, callback):
+        self.reap()                                # first, get rid of old guys
+        ip = string.split (host, '.')
+        ip.reverse()
+        ip = string.join (ip, '.') + '.in-addr.arpa'
+        self.socket.sendto (
+                fast_ptr_request (ip, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = (
+                host, unpack_ptr_reply, callback, int(time.time()))
+        self.id.increment()
+        
+    def handle_read (self):
+        reply, whence = self.socket.recvfrom (512)
+        # for security reasons we may want to double-check
+        # that <whence> is the server we sent the request to.
+        id = (ord(reply[0])<<8) + ord(reply[1])
+        if self.request_map.has_key (id):
+            host, unpack, callback, when = self.request_map[id]
+            del self.request_map[id]
+            ttl, answer = unpack (reply)
+            try:
+                callback (host, ttl, answer)
+            except:
+                (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+                self.log_info('%s %s %s' % ( t,v,tbinfo), 'error')
+                
 class rbl (resolver):
-
-	def resolve_maps (self, host, callback):
-		ip = string.split (host, '.')
-		ip.reverse()
-		ip = string.join (ip, '.') + '.rbl.maps.vix.com'
-		self.socket.sendto (
-			fast_ptr_request (ip, self.get_id()),
-			(self.server, 53)
-			)
-		self.request_map [self.get_id()] = host, self.check_reply, callback
-		self.id.increment()
-	
-	def check_reply (self, r):
-		# we only need to check RCODE.
-		rcode = (ord(r[3])&0xf)
-		self.log_info('MAPS RBL; RCODE =%02x\n %s' % (rcode, repr(r)))
-		return 0, rcode # (ttl, answer)
-
 
+    def resolve_maps (self, host, callback):
+        ip = string.split (host, '.')
+        ip.reverse()
+        ip = string.join (ip, '.') + '.rbl.maps.vix.com'
+        self.socket.sendto (
+                fast_ptr_request (ip, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = host, self.check_reply, callback
+        self.id.increment()
+        
+    def check_reply (self, r):
+            # we only need to check RCODE.
+        rcode = (ord(r[3])&0xf)
+        self.log_info('MAPS RBL; RCODE =%02x\n %s' % (rcode, repr(r)))
+        return 0, rcode # (ttl, answer)
+        
+        
 class hooked_callback:
-	def __init__ (self, hook, callback):
-		self.hook, self.callback = hook, callback
-
-	def __call__ (self, *args):
-		apply (self.hook, args)
-		apply (self.callback, args)
-
+    def __init__ (self, hook, callback):
+        self.hook, self.callback = hook, callback
+        
+    def __call__ (self, *args):
+        apply (self.hook, args)
+        apply (self.callback, args)
+        
 class caching_resolver (resolver):
-	"Cache DNS queries.  Will need to honor the TTL value in the replies"
-
-	def __init__ (*args):
-		apply (resolver.__init__, args)
-		self = args[0]
-		self.cache = {}
-		self.forward_requests = counter()
-		self.reverse_requests = counter()
-		self.cache_hits = counter()
-
-	def resolve (self, host, callback):
-		self.forward_requests.increment()
-		if self.cache.has_key (host):
-			when, ttl, answer = self.cache[host]
-			# ignore TTL for now
-			callback (host, ttl, answer)
-			self.cache_hits.increment()
-		else:
-			resolver.resolve (
-				self,
-				host,
-				hooked_callback (
-					self.callback_hook,
-					callback
-					)
-				)
-			
-	def resolve_ptr (self, host, callback):
-		self.reverse_requests.increment()
-		if self.cache.has_key (host):
-			when, ttl, answer = self.cache[host]
-			# ignore TTL for now
-			callback (host, ttl, answer)
-			self.cache_hits.increment()
-		else:
-			resolver.resolve_ptr (
-				self,
-				host,
-				hooked_callback (
-					self.callback_hook,
-					callback
-					)
-				)
-
-	def callback_hook (self, host, ttl, answer):
-		self.cache[host] = time.time(), ttl, answer
-
-	SERVER_IDENT = 'Caching DNS Resolver (V%s)' % VERSION
-
-	def status (self):
-		import status_handler
-		import producers
-		return producers.simple_producer (
-			'<h2>%s</h2>'					% self.SERVER_IDENT
-			+ '<br>Server: %s'				% self.server
-			+ '<br>Cache Entries: %d'		% len(self.cache)
-			+ '<br>Outstanding Requests: %d' % len(self.request_map)
-			+ '<br>Forward Requests: %s'	% self.forward_requests
-			+ '<br>Reverse Requests: %s'	% self.reverse_requests
-			+ '<br>Cache Hits: %s'			% self.cache_hits
-			)
-
-#test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
-# def test_unpacker ():
-# 	print unpack_address_reply (test_reply)
-# 
-# import time
-# class timer:
-# 	def __init__ (self):
-# 		self.start = time.time()
-# 	def end (self):
-# 		return time.time() - self.start
-# 
-# # I get ~290 unpacks per second for the typical case, compared to ~48
-# # using dnslib directly.  also, that latter number does not include
-# # picking the actual data out.
-# 
-# def benchmark_unpacker():
-# 
-# 	r = range(1000)
-# 	t = timer()
-# 	for i in r:
-# 		unpack_address_reply (test_reply)
-# 	print '%.2f unpacks per second' % (1000.0 / t.end())
-
+    "Cache DNS queries.  Will need to honor the TTL value in the replies"
+    
+    def __init__ (*args):
+        apply (resolver.__init__, args)
+        self = args[0]
+        self.cache = {}
+        self.forward_requests = counter()
+        self.reverse_requests = counter()
+        self.cache_hits = counter()
+        
+    def resolve (self, host, callback):
+        self.forward_requests.increment()
+        if self.cache.has_key (host):
+            when, ttl, answer = self.cache[host]
+            # ignore TTL for now
+            callback (host, ttl, answer)
+            self.cache_hits.increment()
+        else:
+            resolver.resolve (
+                    self,
+                    host,
+                    hooked_callback (
+                            self.callback_hook,
+                            callback
+                            )
+                    )
+            
+    def resolve_ptr (self, host, callback):
+        self.reverse_requests.increment()
+        if self.cache.has_key (host):
+            when, ttl, answer = self.cache[host]
+            # ignore TTL for now
+            callback (host, ttl, answer)
+            self.cache_hits.increment()
+        else:
+            resolver.resolve_ptr (
+                    self,
+                    host,
+                    hooked_callback (
+                            self.callback_hook,
+                            callback
+                            )
+                    )
+            
+    def callback_hook (self, host, ttl, answer):
+        self.cache[host] = time.time(), ttl, answer
+        
+    SERVER_IDENT = 'Caching DNS Resolver (V%s)' % VERSION
+    
+    def status (self):
+        import status_handler
+        import producers
+        return producers.simple_producer (
+                '<h2>%s</h2>'					% self.SERVER_IDENT
+                + '<br>Server: %s'				% self.server
+                + '<br>Cache Entries: %d'		% len(self.cache)
+                + '<br>Outstanding Requests: %d' % len(self.request_map)
+                + '<br>Forward Requests: %s'	% self.forward_requests
+                + '<br>Reverse Requests: %s'	% self.reverse_requests
+                + '<br>Cache Hits: %s'			% self.cache_hits
+                )
+        
+        #test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
+        # def test_unpacker ():
+        # 	print unpack_address_reply (test_reply)
+        # 
+        # import time
+        # class timer:
+        # 	def __init__ (self):
+        # 		self.start = time.time()
+        # 	def end (self):
+        # 		return time.time() - self.start
+        # 
+        # # I get ~290 unpacks per second for the typical case, compared to ~48
+        # # using dnslib directly.  also, that latter number does not include
+        # # picking the actual data out.
+        # 
+        # def benchmark_unpacker():
+        # 
+        # 	r = range(1000)
+        # 	t = timer()
+        # 	for i in r:
+        # 		unpack_address_reply (test_reply)
+        # 	print '%.2f unpacks per second' % (1000.0 / t.end())
+        
 if __name__ == '__main__':
-	import sys
-	if len(sys.argv) == 1:
-		print 'usage: %s [-r] [-s <server_IP>] host [host ...]' % sys.argv[0]
-		sys.exit(0)
-	elif ('-s' in sys.argv):
-		i = sys.argv.index('-s')
-		server = sys.argv[i+1]
-		del sys.argv[i:i+2]
-	else:
-		server = '127.0.0.1'
-
-	if ('-r' in sys.argv):
-		reverse = 1
-		i = sys.argv.index('-r')
-		del sys.argv[i]
-	else:
-		reverse = 0
-
-	if ('-m' in sys.argv):
-		maps = 1
-		sys.argv.remove ('-m')
-	else:
-		maps = 0
-
-	if maps:
-		r = rbl (server)
-	else:
-		r = caching_resolver(server)
-
-	count = len(sys.argv) - 1
-
-	def print_it (host, ttl, answer):
-		global count
-		print '%s: %s' % (host, answer)
-		count = count - 1
-		if not count:
-			r.close()
-
-	for host in sys.argv[1:]:
-		if reverse:
-			r.resolve_ptr (host, print_it)
-		elif maps:
-			r.resolve_maps (host, print_it)
-		else:
-			r.resolve (host, print_it)
-
-	# hooked asyncore.loop()
-	while asyncore.socket_map:
-		asyncore.poll (30.0)
-		print 'requests outstanding: %d' % len(r.request_map)
+    import sys
+    if len(sys.argv) == 1:
+        print 'usage: %s [-r] [-s <server_IP>] host [host ...]' % sys.argv[0]
+        sys.exit(0)
+    elif ('-s' in sys.argv):
+        i = sys.argv.index('-s')
+        server = sys.argv[i+1]
+        del sys.argv[i:i+2]
+    else:
+        server = '127.0.0.1'
+        
+    if ('-r' in sys.argv):
+        reverse = 1
+        i = sys.argv.index('-r')
+        del sys.argv[i]
+    else:
+        reverse = 0
+        
+    if ('-m' in sys.argv):
+        maps = 1
+        sys.argv.remove ('-m')
+    else:
+        maps = 0
+        
+    if maps:
+        r = rbl (server)
+    else:
+        r = caching_resolver(server)
+        
+    count = len(sys.argv) - 1
+    
+    def print_it (host, ttl, answer):
+        global count
+        print '%s: %s' % (host, answer)
+        count = count - 1
+        if not count:
+            r.close()
+            
+    for host in sys.argv[1:]:
+        if reverse:
+            r.resolve_ptr (host, print_it)
+        elif maps:
+            r.resolve_maps (host, print_it)
+        else:
+            r.resolve (host, print_it)
+            
+            # hooked asyncore.loop()
+    while asyncore.socket_map:
+        asyncore.poll (30.0)
+        print 'requests outstanding: %d' % len(r.request_map)

--- Updated File rpc_client.py in package Zope2 --
--- rpc_client.py	2001/04/25 19:07:34	1.2
+++ rpc_client.py	2001/05/01 11:44:49	1.3
@@ -45,274 +45,274 @@
 #
 
 class RPC_Error (exceptions.StandardError):
-	pass
-
-# ===========================================================================
-#							  RPC Client
-# ===========================================================================
-
-# request types:
-# 0 call
-# 1 getattr
-# 2 setattr
-# 3 repr
-# 4 del
-
-
+    pass
+    
+    # ===========================================================================
+    #							  RPC Client
+    # ===========================================================================
+    
+    # request types:
+    # 0 call
+    # 1 getattr
+    # 2 setattr
+    # 3 repr
+    # 4 del
+    
+    
 class rpc_proxy:
-
-	DEBUG = 0
-
-	def __init__ (self, conn, oid):
-		# route around __setattr__
-		self.__dict__['conn'] = conn
-		self.__dict__['oid'] = oid
-
-	# Warning: be VERY CAREFUL with attribute references, keep
-	#             this __getattr__ in mind!
-
-	def __getattr__ (self, attr):
-		# __getattr__ and __call__
-		if attr == '__call__':
-			# 0 == __call__
-			return self.__remote_call__
-		elif attr == '__repr__':
-			# 3 == __repr__
-			return self.__remote_repr__
-		elif attr == '__getitem__':
-			return self.__remote_getitem__
-		elif attr == '__setitem__':
-			return self.__remote_setitem__
-		elif attr == '__len__':
-			return self.__remote_len__
-		else:
-			# 1 == __getattr__
-			return self.__send_request__ (1, attr)
-		
-	def __setattr__ (self, attr, value):
-		return self.__send_request__ (2, (attr, value))
-
-	def __del__ (self):
-		try:
-			self.__send_request__ (4, None)
-		except:
-			import who_calls
-			info = who_calls.compact_traceback()
-			print info
-
-	def __remote_repr__ (self):
-		r = self.__send_request__ (3, None)
-		return '<remote object [%s]>' % r[1:-1]
-
-	def __remote_call__ (self, *args):
-		return self.__send_request__ (0, args)
 
-	def __remote_getitem__ (self, key):
-		return self.__send_request__ (5, key)
-
-	def __remote_setitem__ (self, key, value):
-		return self.__send_request__ (6, (key, value))
-
-	def __remote_len__ (self):
-		return self.__send_request__ (7, None)
-
-	_request_types_ = ['call', 'getattr', 'setattr', 'repr', 'del', 'getitem', 'setitem', 'len']
-
-	def __send_request__ (self, *args):
-		if self.DEBUG:
-			kind = args[0]
-			print (
-				'RPC: ==> %s:%08x:%s:%s' % (
-					self.conn.address,
-					self.oid,
-					self._request_types_[kind],
-					repr(args[1:])
-					)
-				)
-		packet = marshal.dumps ((self.oid,)+args)
-		# send request
-		self.conn.send_packet (packet)
-		# get response
-		data = self.conn.receive_packet()
-		# types of response:
-		# 0: proxy
-		# 1: error
-		# 2: marshal'd data
-		
-		kind, value = marshal.loads (data)
-
-		if kind == 0:
-			# proxy (value == oid)
-			if self.DEBUG:
-				print 'RPC: <== proxy(%08x)' % (value)
-			return rpc_proxy (self.conn, value)
-		elif kind == 1:
-			raise RPC_Error, value
-		else:
-			if self.DEBUG:
-				print 'RPC: <== %s' % (repr(value))
-			return value
-
+    DEBUG = 0
+    
+    def __init__ (self, conn, oid):
+            # route around __setattr__
+        self.__dict__['conn'] = conn
+        self.__dict__['oid'] = oid
+        
+        # Warning: be VERY CAREFUL with attribute references, keep
+        #             this __getattr__ in mind!
+        
+    def __getattr__ (self, attr):
+            # __getattr__ and __call__
+        if attr == '__call__':
+                # 0 == __call__
+            return self.__remote_call__
+        elif attr == '__repr__':
+                # 3 == __repr__
+            return self.__remote_repr__
+        elif attr == '__getitem__':
+            return self.__remote_getitem__
+        elif attr == '__setitem__':
+            return self.__remote_setitem__
+        elif attr == '__len__':
+            return self.__remote_len__
+        else:
+                # 1 == __getattr__
+            return self.__send_request__ (1, attr)
+            
+    def __setattr__ (self, attr, value):
+        return self.__send_request__ (2, (attr, value))
+        
+    def __del__ (self):
+        try:
+            self.__send_request__ (4, None)
+        except:
+            import who_calls
+            info = who_calls.compact_traceback()
+            print info
+            
+    def __remote_repr__ (self):
+        r = self.__send_request__ (3, None)
+        return '<remote object [%s]>' % r[1:-1]
+        
+    def __remote_call__ (self, *args):
+        return self.__send_request__ (0, args)
+        
+    def __remote_getitem__ (self, key):
+        return self.__send_request__ (5, key)
+        
+    def __remote_setitem__ (self, key, value):
+        return self.__send_request__ (6, (key, value))
+        
+    def __remote_len__ (self):
+        return self.__send_request__ (7, None)
+        
+    _request_types_ = ['call', 'getattr', 'setattr', 'repr', 'del', 'getitem', 'setitem', 'len']
+    
+    def __send_request__ (self, *args):
+        if self.DEBUG:
+            kind = args[0]
+            print (
+                    'RPC: ==> %s:%08x:%s:%s' % (
+                            self.conn.address,
+                            self.oid,
+                            self._request_types_[kind],
+                            repr(args[1:])
+                            )
+                    )
+        packet = marshal.dumps ((self.oid,)+args)
+        # send request
+        self.conn.send_packet (packet)
+        # get response
+        data = self.conn.receive_packet()
+        # types of response:
+        # 0: proxy
+        # 1: error
+        # 2: marshal'd data
+        
+        kind, value = marshal.loads (data)
+        
+        if kind == 0:
+                # proxy (value == oid)
+            if self.DEBUG:
+                print 'RPC: <== proxy(%08x)' % (value)
+            return rpc_proxy (self.conn, value)
+        elif kind == 1:
+            raise RPC_Error, value
+        else:
+            if self.DEBUG:
+                print 'RPC: <== %s' % (repr(value))
+            return value
+            
 class rpc_connection:
-
-	cache = {}
-
-	def __init__ (self, address):
-		self.address = address
-		self.connect ()
-
-	def connect (self):
-		s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
-		s.connect (self.address)
-		self.socket = s
-
-	def receive_packet (self):
-		packet_len = string.atoi (self.socket.recv (8), 16)
-		packet = []
-		while packet_len:
-			data = self.socket.recv (8192)
-			packet.append (data)
-			packet_len = packet_len - len(data)
-		return string.join (packet, '')
-
-	def send_packet (self, packet):
-		self.socket.send ('%08x%s' % (len(packet), packet))
 
+    cache = {}
+    
+    def __init__ (self, address):
+        self.address = address
+        self.connect ()
+        
+    def connect (self):
+        s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
+        s.connect (self.address)
+        self.socket = s
+        
+    def receive_packet (self):
+        packet_len = string.atoi (self.socket.recv (8), 16)
+        packet = []
+        while packet_len:
+            data = self.socket.recv (8192)
+            packet.append (data)
+            packet_len = packet_len - len(data)
+        return string.join (packet, '')
+        
+    def send_packet (self, packet):
+        self.socket.send ('%08x%s' % (len(packet), packet))
+        
 def rpc_connect (address = ('localhost', 8746)):
-	if not rpc_connection.cache.has_key (address):
-		conn = rpc_connection (address)
-		# get oid of remote object
-		data = conn.receive_packet()
-		(oid,) = marshal.loads (data)
-		rpc_connection.cache[address] = rpc_proxy (conn, oid)
-	return rpc_connection.cache[address]
-
-# ===========================================================================
-#			fastrpc client
-# ===========================================================================
-
+    if not rpc_connection.cache.has_key (address):
+        conn = rpc_connection (address)
+        # get oid of remote object
+        data = conn.receive_packet()
+        (oid,) = marshal.loads (data)
+        rpc_connection.cache[address] = rpc_proxy (conn, oid)
+    return rpc_connection.cache[address]
+    
+    # ===========================================================================
+    #			fastrpc client
+    # ===========================================================================
+    
 class fastrpc_proxy:
-
-	def __init__ (self, conn, path=()):
-		self.conn = conn
-		self.path = path
-
-	def __getattr__ (self, attr):
-		if attr == '__call__':
-			return self.__method_caller__
-		else:
-			return fastrpc_proxy (self.conn, self.path + (attr,))
-
-	def __method_caller__ (self, *args):
-		# send request
-		packet = marshal.dumps ((self.path, args))
-		self.conn.send_packet (packet)
-		# get response
-		data = self.conn.receive_packet()
-		error, result = marshal.loads (data)
-		if error is None:
-			return result
-		else:
-			raise RPC_Error, error
-
-	def __repr__ (self):
-		return '<remote-method-%s at %x>' % (string.join (self.path, '.'), id (self))
 
+    def __init__ (self, conn, path=()):
+        self.conn = conn
+        self.path = path
+        
+    def __getattr__ (self, attr):
+        if attr == '__call__':
+            return self.__method_caller__
+        else:
+            return fastrpc_proxy (self.conn, self.path + (attr,))
+            
+    def __method_caller__ (self, *args):
+            # send request
+        packet = marshal.dumps ((self.path, args))
+        self.conn.send_packet (packet)
+        # get response
+        data = self.conn.receive_packet()
+        error, result = marshal.loads (data)
+        if error is None:
+            return result
+        else:
+            raise RPC_Error, error
+            
+    def __repr__ (self):
+        return '<remote-method-%s at %x>' % (string.join (self.path, '.'), id (self))
+        
 def fastrpc_connect (address = ('localhost', 8748)):
-	if not rpc_connection.cache.has_key (address):
-		conn = rpc_connection (address)
-		rpc_connection.cache[address] = fastrpc_proxy (conn)
-	return rpc_connection.cache[address]
-
-# ===========================================================================
-#						 async fastrpc client
-# ===========================================================================
-
+    if not rpc_connection.cache.has_key (address):
+        conn = rpc_connection (address)
+        rpc_connection.cache[address] = fastrpc_proxy (conn)
+    return rpc_connection.cache[address]
+    
+    # ===========================================================================
+    #						 async fastrpc client
+    # ===========================================================================
+    
 import asynchat
 import fifo
 
 class async_fastrpc_client (asynchat.async_chat):
-
-	STATE_LENGTH = 'length state'
-	STATE_PACKET = 'packet state'
-
-	def __init__ (self, address=('idb', 3001)):
-
-		asynchat.async_chat.__init__ (self)
 
-		if type(address) is type(''):
-			family = socket.AF_UNIX
-		else:
-			family = socket.AF_INET
-
-		self.create_socket (family, socket.SOCK_STREAM)
-		self.address = address
-		self.request_fifo = fifo.fifo()
-		self.buffer = []
-		self.pstate = self.STATE_LENGTH
-		self.set_terminator (8)
-		self._connected = 0
-		self.connect (self.address)
-
-	def log (self, *args):
-		pass
-
-	def handle_connect (self):
-		self._connected = 1
-
-	def close (self):
-		self._connected = 0
-		self.flush_pending_requests ('lost connection to rpc server')
-		asynchat.async_chat.close(self)
-
-	def flush_pending_requests (self, why):
-		f = self.request_fifo
-		while len(f):
-			callback = f.pop()
-			callback (why, None)
-
-	def collect_incoming_data (self, data):
-		self.buffer.append (data)
-
-	def found_terminator (self):
-		self.buffer, data = [], string.join (self.buffer, '')
-
-		if self.pstate is self.STATE_LENGTH:
-			packet_length = string.atoi (data, 16)
-			self.set_terminator (packet_length)
-			self.pstate = self.STATE_PACKET
-		else:
-			# modified to fix socket leak in chat server, 2000-01-27, schiller@eGroups.net
-			#self.set_terminator (8)
-			#self.pstate = self.STATE_LENGTH
-			error, result = marshal.loads (data)
-			callback = self.request_fifo.pop()
-			callback (error, result)
-			self.close()	# for chat server
-			
-	def call_method (self, method, args, callback):
-		if not self._connected:
-			# might be a unix socket...
-			family, type = self.family_and_type
-			self.create_socket (family, type)
-			self.connect (self.address)
-		# push the request out the socket
-		path = string.split (method, '.')
-		packet = marshal.dumps ((path, args))
-		self.push ('%08x%s' % (len(packet), packet))
-		self.request_fifo.push (callback)
-
-
+    STATE_LENGTH = 'length state'
+    STATE_PACKET = 'packet state'
+    
+    def __init__ (self, address=('idb', 3001)):
+    
+        asynchat.async_chat.__init__ (self)
+        
+        if type(address) is type(''):
+            family = socket.AF_UNIX
+        else:
+            family = socket.AF_INET
+            
+        self.create_socket (family, socket.SOCK_STREAM)
+        self.address = address
+        self.request_fifo = fifo.fifo()
+        self.buffer = []
+        self.pstate = self.STATE_LENGTH
+        self.set_terminator (8)
+        self._connected = 0
+        self.connect (self.address)
+        
+    def log (self, *args):
+        pass
+        
+    def handle_connect (self):
+        self._connected = 1
+        
+    def close (self):
+        self._connected = 0
+        self.flush_pending_requests ('lost connection to rpc server')
+        asynchat.async_chat.close(self)
+        
+    def flush_pending_requests (self, why):
+        f = self.request_fifo
+        while len(f):
+            callback = f.pop()
+            callback (why, None)
+            
+    def collect_incoming_data (self, data):
+        self.buffer.append (data)
+        
+    def found_terminator (self):
+        self.buffer, data = [], string.join (self.buffer, '')
+        
+        if self.pstate is self.STATE_LENGTH:
+            packet_length = string.atoi (data, 16)
+            self.set_terminator (packet_length)
+            self.pstate = self.STATE_PACKET
+        else:
+                # modified to fix socket leak in chat server, 2000-01-27, schiller@eGroups.net
+                #self.set_terminator (8)
+                #self.pstate = self.STATE_LENGTH
+            error, result = marshal.loads (data)
+            callback = self.request_fifo.pop()
+            callback (error, result)
+            self.close()	# for chat server
+            
+    def call_method (self, method, args, callback):
+        if not self._connected:
+                # might be a unix socket...
+            family, type = self.family_and_type
+            self.create_socket (family, type)
+            self.connect (self.address)
+            # push the request out the socket
+        path = string.split (method, '.')
+        packet = marshal.dumps ((path, args))
+        self.push ('%08x%s' % (len(packet), packet))
+        self.request_fifo.push (callback)
+        
+        
 if __name__ == '__main__':
-	import sys
-	if '-f' in sys.argv:
-		connect = fastrpc_connect
-	else:
-		connect = rpc_connect
-
-	print 'connecting...'
-	c = connect()
-	print 'calling <remote>.calc.sum (1,2,3)'
-	print c.calc.sum (1,2,3)
-	print 'calling <remote>.calc.nonexistent(), expect an exception!'
-	print c.calc.nonexistent()
+    import sys
+    if '-f' in sys.argv:
+        connect = fastrpc_connect
+    else:
+        connect = rpc_connect
+        
+    print 'connecting...'
+    c = connect()
+    print 'calling <remote>.calc.sum (1,2,3)'
+    print c.calc.sum (1,2,3)
+    print 'calling <remote>.calc.nonexistent(), expect an exception!'
+    print c.calc.nonexistent()

--- Updated File rpc_server.py in package Zope2 --
--- rpc_server.py	2001/04/25 19:07:34	1.2
+++ rpc_server.py	2001/05/01 11:44:49	1.3
@@ -56,268 +56,268 @@
 
 class rpc_channel (asynchat.async_chat):
 
-	'Simple RPC server.'
-
-	# a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm
-	# (hex length in 8 bytes, followed by marshal'd packet data)
-	# same protocol used in both directions.
-
-	STATE_LENGTH = 'length state'
-	STATE_PACKET = 'packet state'
-
-	ac_out_buffer_size = 65536
-
-	request_counter = counter()
-	exception_counter = counter()
-	client_counter = counter()
-
-	def __init__ (self, root, conn, addr):
-		self.root = root
-		self.addr = addr
-		asynchat.async_chat.__init__ (self, conn)
-		self.pstate = self.STATE_LENGTH
-		self.set_terminator (8)
-		self.buffer = []
-		self.proxies = {}
-		rid = id(root)
-		self.new_reference (root)
-		p = marshal.dumps ((rid,))
-		# send root oid to the other side
-		self.push ('%08x%s' % (len(p), p))
-		self.client_counter.increment()
-		
-	def new_reference (self, object):
-		oid = id(object)
-		ignore, refcnt = self.proxies.get (oid, (None, 0))
-		self.proxies[oid] = (object, refcnt + 1)
-
-	def forget_reference (self, oid):
-		object, refcnt = self.proxies.get (oid, (None, 0))
-		if refcnt > 1:
-			self.proxies[oid] = (object, refcnt - 1)
-		else:
-			del self.proxies[oid]
-
-	def log (self, *ignore):
-		pass
-
-	def collect_incoming_data (self, data):
-		self.buffer.append (data)
-		
-	def found_terminator (self):
-		self.buffer, data = [], string.join (self.buffer, '')
-
-		if self.pstate is self.STATE_LENGTH:
-			packet_length = string.atoi (data, 16)
-			self.set_terminator (packet_length)
-			self.pstate = self.STATE_PACKET
-		else:
-
-			self.set_terminator (8)
-			self.pstate = self.STATE_LENGTH
-
-			oid, kind, arg = marshal.loads (data)
-
-			obj, refcnt = self.proxies[oid]
-			e = None
-			reply_kind = 2
-
-			try:
-				if kind == 0:
-					# __call__
-					result = apply (obj, arg)
-				elif kind == 1:
-					# __getattr__
-					result = getattr (obj, arg)
-				elif kind == 2:
-					# __setattr__
-					key, value = arg
-					result = setattr (obj, key, value)
-				elif kind == 3:
-					# __repr__
-					result = repr(obj)
-				elif kind == 4:
-					# __del__
-					self.forget_reference (oid)
-					result = None
-				elif kind == 5:
-					# __getitem__
-					result = obj[arg]
-				elif kind == 6:
-					# __setitem__
-					(key, value) = arg
-					obj[key] = value
-					result = None
-				elif kind == 7:
-					# __len__
-					result = len(obj)
-
-			except:
-				reply_kind = 1
-				(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
-				result = '%s:%s:%s:%s (%s:%s)' % (MY_NAME, file, fun, line, t, str(v))
-				self.log_info (result, 'error')
-				self.exception_counter.increment()
-
-			self.request_counter.increment()
-
-			# optimize a common case
-			if type(result) is types.InstanceType:
-				can_marshal = 0
-			else:
-				can_marshal = 1
-
-			try:
-				rb = marshal.dumps ((reply_kind, result))
-			except ValueError:
-				can_marshal = 0
-
-			if not can_marshal:
-				# unmarshallable object, return a reference
-				rid = id(result)
-				self.new_reference (result)
-				rb = marshal.dumps ((0, rid))
-				
-			self.push_with_producer (
-				scanning_producer (
-					('%08x' % len(rb)) + rb,
-					buffer_size = 65536
-					)
-				)
-
+    'Simple RPC server.'
+    
+    # a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm
+    # (hex length in 8 bytes, followed by marshal'd packet data)
+    # same protocol used in both directions.
+    
+    STATE_LENGTH = 'length state'
+    STATE_PACKET = 'packet state'
+    
+    ac_out_buffer_size = 65536
+    
+    request_counter = counter()
+    exception_counter = counter()
+    client_counter = counter()
+    
+    def __init__ (self, root, conn, addr):
+        self.root = root
+        self.addr = addr
+        asynchat.async_chat.__init__ (self, conn)
+        self.pstate = self.STATE_LENGTH
+        self.set_terminator (8)
+        self.buffer = []
+        self.proxies = {}
+        rid = id(root)
+        self.new_reference (root)
+        p = marshal.dumps ((rid,))
+        # send root oid to the other side
+        self.push ('%08x%s' % (len(p), p))
+        self.client_counter.increment()
+        
+    def new_reference (self, object):
+        oid = id(object)
+        ignore, refcnt = self.proxies.get (oid, (None, 0))
+        self.proxies[oid] = (object, refcnt + 1)
+        
+    def forget_reference (self, oid):
+        object, refcnt = self.proxies.get (oid, (None, 0))
+        if refcnt > 1:
+            self.proxies[oid] = (object, refcnt - 1)
+        else:
+            del self.proxies[oid]
+            
+    def log (self, *ignore):
+        pass
+        
+    def collect_incoming_data (self, data):
+        self.buffer.append (data)
+        
+    def found_terminator (self):
+        self.buffer, data = [], string.join (self.buffer, '')
+        
+        if self.pstate is self.STATE_LENGTH:
+            packet_length = string.atoi (data, 16)
+            self.set_terminator (packet_length)
+            self.pstate = self.STATE_PACKET
+        else:
+        
+            self.set_terminator (8)
+            self.pstate = self.STATE_LENGTH
+            
+            oid, kind, arg = marshal.loads (data)
+            
+            obj, refcnt = self.proxies[oid]
+            e = None
+            reply_kind = 2
+            
+            try:
+                if kind == 0:
+                        # __call__
+                    result = apply (obj, arg)
+                elif kind == 1:
+                        # __getattr__
+                    result = getattr (obj, arg)
+                elif kind == 2:
+                        # __setattr__
+                    key, value = arg
+                    result = setattr (obj, key, value)
+                elif kind == 3:
+                        # __repr__
+                    result = repr(obj)
+                elif kind == 4:
+                        # __del__
+                    self.forget_reference (oid)
+                    result = None
+                elif kind == 5:
+                        # __getitem__
+                    result = obj[arg]
+                elif kind == 6:
+                        # __setitem__
+                    (key, value) = arg
+                    obj[key] = value
+                    result = None
+                elif kind == 7:
+                        # __len__
+                    result = len(obj)
+                    
+            except:
+                reply_kind = 1
+                (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+                result = '%s:%s:%s:%s (%s:%s)' % (MY_NAME, file, fun, line, t, str(v))
+                self.log_info (result, 'error')
+                self.exception_counter.increment()
+                
+            self.request_counter.increment()
+            
+            # optimize a common case
+            if type(result) is types.InstanceType:
+                can_marshal = 0
+            else:
+                can_marshal = 1
+                
+            try:
+                rb = marshal.dumps ((reply_kind, result))
+            except ValueError:
+                can_marshal = 0
+                
+            if not can_marshal:
+                    # unmarshallable object, return a reference
+                rid = id(result)
+                self.new_reference (result)
+                rb = marshal.dumps ((0, rid))
+                
+            self.push_with_producer (
+                    scanning_producer (
+                            ('%08x' % len(rb)) + rb,
+                            buffer_size = 65536
+                            )
+                    )
+            
 class rpc_server_root:
-	pass
-
+    pass
+    
 class rpc_server (asyncore.dispatcher):
-
-	def __init__ (self, root, address = ('', 8746)):
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-		self.set_reuse_addr()
-		self.bind (address)
-		self.listen (128)
-		self.root = root
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		rpc_channel (self.root, conn, addr)
-		
-
-# ===========================================================================
-#						   Fast RPC server
-# ===========================================================================
 
-# no proxies, request consists
-# of a 'chain' of getattrs terminated by a __call__.
-
-# Protocol:
-# <path>.<to>.<object> ( <param1>, <param2>, ... )
-# => ( <value1>, <value2>, ... )
-#
-#
-# (<path>, <params>)
-# path: tuple of strings
-# params: tuple of objects
-
+    def __init__ (self, root, address = ('', 8746)):
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind (address)
+        self.listen (128)
+        self.root = root
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        rpc_channel (self.root, conn, addr)
+        
+        
+        # ===========================================================================
+        #						   Fast RPC server
+        # ===========================================================================
+        
+        # no proxies, request consists
+        # of a 'chain' of getattrs terminated by a __call__.
+        
+        # Protocol:
+        # <path>.<to>.<object> ( <param1>, <param2>, ... )
+        # => ( <value1>, <value2>, ... )
+        #
+        #
+        # (<path>, <params>)
+        # path: tuple of strings
+        # params: tuple of objects
+        
 class fastrpc_channel (asynchat.async_chat):
-
-	'Simple RPC server'
-
-	# a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm
-	# (hex length in 8 bytes, followed by marshal'd packet data)
-	# same protocol used in both directions.
-
-	# A request consists of (<path-tuple>, <args-tuple>)
-	# where <path-tuple> is a list of strings (eqv to string.split ('a.b.c', '.'))
-
-	STATE_LENGTH = 'length state'
-	STATE_PACKET = 'packet state'
-
-	def __init__ (self, root, conn, addr):
-		self.root = root
-		self.addr = addr
-		asynchat.async_chat.__init__ (self, conn)
-		self.pstate = self.STATE_LENGTH
-		self.set_terminator (8)
-		self.buffer = []
-		
-	def log (*ignore):
-		pass
-
-	def collect_incoming_data (self, data):
-		self.buffer.append (data)
-		
-	def found_terminator (self):
-		self.buffer, data = [], string.join (self.buffer, '')
-
-		if self.pstate is self.STATE_LENGTH:
-			packet_length = string.atoi (data, 16)
-			self.set_terminator (packet_length)
-			self.pstate = self.STATE_PACKET
-		else:
-			self.set_terminator (8)
-			self.pstate = self.STATE_LENGTH
-			(path, params) = marshal.loads (data)
-			o = self.root
-
-			e = None
-
-			try:
-				for p in path:
-					o = getattr (o, p)
-				result = apply (o, params)
-			except:
-				e = repr (asyncore.compact_traceback())
-				result = None
-
-			rb = marshal.dumps ((e,result))
-			self.push (('%08x' % len(rb)) + rb)
 
+    'Simple RPC server'
+    
+    # a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm
+    # (hex length in 8 bytes, followed by marshal'd packet data)
+    # same protocol used in both directions.
+    
+    # A request consists of (<path-tuple>, <args-tuple>)
+    # where <path-tuple> is a list of strings (eqv to string.split ('a.b.c', '.'))
+    
+    STATE_LENGTH = 'length state'
+    STATE_PACKET = 'packet state'
+    
+    def __init__ (self, root, conn, addr):
+        self.root = root
+        self.addr = addr
+        asynchat.async_chat.__init__ (self, conn)
+        self.pstate = self.STATE_LENGTH
+        self.set_terminator (8)
+        self.buffer = []
+        
+    def log (*ignore):
+        pass
+        
+    def collect_incoming_data (self, data):
+        self.buffer.append (data)
+        
+    def found_terminator (self):
+        self.buffer, data = [], string.join (self.buffer, '')
+        
+        if self.pstate is self.STATE_LENGTH:
+            packet_length = string.atoi (data, 16)
+            self.set_terminator (packet_length)
+            self.pstate = self.STATE_PACKET
+        else:
+            self.set_terminator (8)
+            self.pstate = self.STATE_LENGTH
+            (path, params) = marshal.loads (data)
+            o = self.root
+            
+            e = None
+            
+            try:
+                for p in path:
+                    o = getattr (o, p)
+                result = apply (o, params)
+            except:
+                e = repr (asyncore.compact_traceback())
+                result = None
+                
+            rb = marshal.dumps ((e,result))
+            self.push (('%08x' % len(rb)) + rb)
+            
 class fastrpc_server (asyncore.dispatcher):
 
-	def __init__ (self, root, address = ('', 8748)):
-		self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-		self.set_reuse_addr()
-		self.bind (address)
-		self.listen (128)
-		self.root = root
-
-	def handle_accept (self):
-		conn, addr = self.accept()
-		fastrpc_channel (self.root, conn, addr)
-
-# ===========================================================================
-
+    def __init__ (self, root, address = ('', 8748)):
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind (address)
+        self.listen (128)
+        self.root = root
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        fastrpc_channel (self.root, conn, addr)
+        
+        # ===========================================================================
+        
 if __name__ == '__main__':
-
-	class thing:
-		def __del__ (self):
-			print 'a thing has gone away %08x' % id(self)
-
-	class sample_calc:
-
-		def product (self, *values):
-			return reduce (lambda a,b: a*b, values, 1)
-
-		def sum (self, *values):
-			return reduce (lambda a,b: a+b, values, 0)
-
-		def eval (self, string):
-			return eval (string)
-
-		def make_a_thing (self):
-			return thing()
-
-	import sys
-
-	if '-f' in sys.argv:
-		server_class = fastrpc_server
-		address = ('', 8748)
-	else:
-		server_class = rpc_server
-		address = ('', 8746)
 
-	root = rpc_server_root()
-	root.calc = sample_calc()
-	root.sys = sys
-	rs = server_class (root, address)
-	asyncore.loop()
+    class thing:
+        def __del__ (self):
+            print 'a thing has gone away %08x' % id(self)
+            
+    class sample_calc:
+    
+        def product (self, *values):
+            return reduce (lambda a,b: a*b, values, 1)
+            
+        def sum (self, *values):
+            return reduce (lambda a,b: a+b, values, 0)
+            
+        def eval (self, string):
+            return eval (string)
+            
+        def make_a_thing (self):
+            return thing()
+            
+    import sys
+    
+    if '-f' in sys.argv:
+        server_class = fastrpc_server
+        address = ('', 8748)
+    else:
+        server_class = rpc_server
+        address = ('', 8746)
+        
+    root = rpc_server_root()
+    root.calc = sample_calc()
+    root.sys = sys
+    rs = server_class (root, address)
+    asyncore.loop()

--- Updated File script_handler.py in package Zope2 --
--- script_handler.py	2001/04/25 19:07:34	1.2
+++ script_handler.py	2001/05/01 11:44:49	1.3
@@ -36,181 +36,181 @@
 
 class script_handler:
 
-	extension = 'mpy'
-	restricted = 0
-
-	script_regex = re.compile (
-		r'.*/([^/]+\.%s)' % extension,
-		re.IGNORECASE
-		)
-
-	def __init__ (self, filesystem):
-		self.filesystem = filesystem
-		self.hits = counter.counter()
-		self.exceptions = counter.counter()
-
-	def match (self, request):
-		[path, params, query, fragment] = request.split_uri()
-		m = self.script_regex.match (path)
-		return (m and (m.end() == len(path)))
-
-	def handle_request (self, request):
-		
-		[path, params, query, fragment] = split_path (request.uri)
-
-		while path and path[0] == '/':
-			path = path[1:]
-
-		if '%' in path:
-			path = unquote (path)
-
-		if not self.filesystem.isfile (path):
-			request.error (404)
-			return
-		else:
-
-			self.hits.increment()
-
-			request.script_filename = self.filesystem.translate (path)
-
-			if request.command in ('put', 'post'):
-				# look for a Content-Length header.
-				cl = request.get_header ('content-length')
-				length = int(cl)
-				if not cl:
-					request.error (411)
-				else:
-					collector (self, length, request)
-			else:
-				self.continue_request (
-					request,
-					StringIO.StringIO() # empty stdin
-					)
-
-	def continue_request (self, request, stdin):
-		temp_files = stdin, StringIO.StringIO(), StringIO.StringIO()
-		old_files = sys.stdin, sys.stdout, sys.stderr
-
-		if self.restricted:
-			r = rexec.RExec()
-
-		try:
-			sys.request = request
-			sys.stdin, sys.stdout, sys.stderr = temp_files
-			try:
-				if self.restricted:
-					r.s_execfile (request.script_filename)
-				else:
-					execfile (request.script_filename)
-				request.reply_code = 200
-			except:
-				request.reply_code = 500
-				self.exceptions.increment()
-		finally:
-			sys.stdin, sys.stdout, sys.stderr = old_files
-			del sys.request
-
-		i,o,e = temp_files
-
-		if request.reply_code != 200:
-			s = e.getvalue()
-		else:
-			s = o.getvalue()
-
-		request['Content-Length'] = len(s)
-		request.push (s)
-		request.done()
-
-	def status (self):
-		return producer.simple_producer (
-			'<li>Server-Side Script Handler'
-			+ '<ul>'
-			+ '  <li><b>Hits:</b> %s' % self.hits
-			+ '  <li><b>Exceptions:</b> %s' % self.exceptions
-			+ '</ul>'
-			)
-
-
+    extension = 'mpy'
+    restricted = 0
+    
+    script_regex = re.compile (
+            r'.*/([^/]+\.%s)' % extension,
+            re.IGNORECASE
+            )
+    
+    def __init__ (self, filesystem):
+        self.filesystem = filesystem
+        self.hits = counter.counter()
+        self.exceptions = counter.counter()
+        
+    def match (self, request):
+        [path, params, query, fragment] = request.split_uri()
+        m = self.script_regex.match (path)
+        return (m and (m.end() == len(path)))
+        
+    def handle_request (self, request):
+    
+        [path, params, query, fragment] = split_path (request.uri)
+        
+        while path and path[0] == '/':
+            path = path[1:]
+            
+        if '%' in path:
+            path = unquote (path)
+            
+        if not self.filesystem.isfile (path):
+            request.error (404)
+            return
+        else:
+        
+            self.hits.increment()
+            
+            request.script_filename = self.filesystem.translate (path)
+            
+            if request.command in ('put', 'post'):
+                    # look for a Content-Length header.
+                cl = request.get_header ('content-length')
+                length = int(cl)
+                if not cl:
+                    request.error (411)
+                else:
+                    collector (self, length, request)
+            else:
+                self.continue_request (
+                        request,
+                        StringIO.StringIO() # empty stdin
+                        )
+                
+    def continue_request (self, request, stdin):
+        temp_files = stdin, StringIO.StringIO(), StringIO.StringIO()
+        old_files = sys.stdin, sys.stdout, sys.stderr
+        
+        if self.restricted:
+            r = rexec.RExec()
+            
+        try:
+            sys.request = request
+            sys.stdin, sys.stdout, sys.stderr = temp_files
+            try:
+                if self.restricted:
+                    r.s_execfile (request.script_filename)
+                else:
+                    execfile (request.script_filename)
+                request.reply_code = 200
+            except:
+                request.reply_code = 500
+                self.exceptions.increment()
+        finally:
+            sys.stdin, sys.stdout, sys.stderr = old_files
+            del sys.request
+            
+        i,o,e = temp_files
+        
+        if request.reply_code != 200:
+            s = e.getvalue()
+        else:
+            s = o.getvalue()
+            
+        request['Content-Length'] = len(s)
+        request.push (s)
+        request.done()
+        
+    def status (self):
+        return producer.simple_producer (
+                '<li>Server-Side Script Handler'
+                + '<ul>'
+                + '  <li><b>Hits:</b> %s' % self.hits
+                + '  <li><b>Exceptions:</b> %s' % self.exceptions
+                + '</ul>'
+                )
+        
+        
 class persistent_script_handler:
-
-	def __init__ (self):
-		self.modules = {}
-		self.hits = counter.counter()
-		self.exceptions = counter.counter()
-
-	def add_module (self, name, module):
-		self.modules[name] = module
-
-	def del_module (self, name):
-		del self.modules[name]
-
-	def match (self, request):
-		[path, params, query, fragment] = request.split_uri()
-		parts = string.split (path, '/')
-		if (len(parts)>1) and self.modules.has_key (parts[1]):
-			module = self.modules[parts[1]]
-			request.module = module
-			return 1
-		else:
-			return 0
-
-	def handle_request (self, request):
-		if request.command in ('put', 'post'):
-			# look for a Content-Length header.
-			cl = request.get_header ('content-length')
-			length = int(cl)
-			if not cl:
-				request.error (411)
-			else:
-				collector (self, length, request)
-		else:
-			self.continue_request (request, StringIO.StringIO())
 
-	def continue_request (self, request, input_data):
-		temp_files = input_data, StringIO.StringIO(), StringIO.StringIO()
-		old_files = sys.stdin, sys.stdout, sys.stderr
-
-		try:
-			sys.stdin, sys.stdout, sys.stderr = temp_files
-			# provide a default
-			request['Content-Type'] = 'text/html'
-			try:
-				request.module.main (request)
-				request.reply_code = 200
-			except:
-				request.reply_code = 500
-				self.exceptions.increment()
-		finally:
-			sys.stdin, sys.stdout, sys.stderr = old_files
-
-		i,o,e = temp_files
-
-		if request.reply_code != 200:
-			s = e.getvalue()
-		else:
-			s = o.getvalue()
-
-		request['Content-Length'] = len(s)
-		request.push (s)
-		request.done()
-
+    def __init__ (self):
+        self.modules = {}
+        self.hits = counter.counter()
+        self.exceptions = counter.counter()
+        
+    def add_module (self, name, module):
+        self.modules[name] = module
+        
+    def del_module (self, name):
+        del self.modules[name]
+        
+    def match (self, request):
+        [path, params, query, fragment] = request.split_uri()
+        parts = string.split (path, '/')
+        if (len(parts)>1) and self.modules.has_key (parts[1]):
+            module = self.modules[parts[1]]
+            request.module = module
+            return 1
+        else:
+            return 0
+            
+    def handle_request (self, request):
+        if request.command in ('put', 'post'):
+                # look for a Content-Length header.
+            cl = request.get_header ('content-length')
+            length = int(cl)
+            if not cl:
+                request.error (411)
+            else:
+                collector (self, length, request)
+        else:
+            self.continue_request (request, StringIO.StringIO())
+            
+    def continue_request (self, request, input_data):
+        temp_files = input_data, StringIO.StringIO(), StringIO.StringIO()
+        old_files = sys.stdin, sys.stdout, sys.stderr
+        
+        try:
+            sys.stdin, sys.stdout, sys.stderr = temp_files
+            # provide a default
+            request['Content-Type'] = 'text/html'
+            try:
+                request.module.main (request)
+                request.reply_code = 200
+            except:
+                request.reply_code = 500
+                self.exceptions.increment()
+        finally:
+            sys.stdin, sys.stdout, sys.stderr = old_files
+            
+        i,o,e = temp_files
+        
+        if request.reply_code != 200:
+            s = e.getvalue()
+        else:
+            s = o.getvalue()
+            
+        request['Content-Length'] = len(s)
+        request.push (s)
+        request.done()
+        
 class collector:
-
-	def __init__ (self, handler, length, request):
-		self.handler = handler
-		self.request = request
-		self.request.collector = self
-		self.request.channel.set_terminator (length)
-		self.buffer = StringIO.StringIO()
-
-	def collect_incoming_data (self, data):
-		self.buffer.write (data)
 
-	def found_terminator (self):
-		self.buffer.seek(0)
-		self.request.collector = None
-		self.request.channel.set_terminator ('\r\n\r\n')
-		self.handler.continue_request (
-			self.request,
-			self.buffer
-			)
+    def __init__ (self, handler, length, request):
+        self.handler = handler
+        self.request = request
+        self.request.collector = self
+        self.request.channel.set_terminator (length)
+        self.buffer = StringIO.StringIO()
+        
+    def collect_incoming_data (self, data):
+        self.buffer.write (data)
+        
+    def found_terminator (self):
+        self.buffer.seek(0)
+        self.request.collector = None
+        self.request.channel.set_terminator ('\r\n\r\n')
+        self.handler.continue_request (
+                self.request,
+                self.buffer
+                )

--- Updated File start_medusa.py in package Zope2 --
--- start_medusa.py	2001/04/25 19:07:34	1.2
+++ start_medusa.py	2001/05/01 11:44:49	1.3
@@ -26,18 +26,18 @@
 import asyncore
 
 if len(sys.argv) > 1:
-	# process a few convenient arguments
-	[HOSTNAME, IP_ADDRESS, PUBLISHING_ROOT] = sys.argv[1:]
+        # process a few convenient arguments
+    [HOSTNAME, IP_ADDRESS, PUBLISHING_ROOT] = sys.argv[1:]
 else:
-	HOSTNAME			= 'www.nightmare.com'
-	# This is the IP address of the network interface you want
-	# your servers to be visible from.  This can be changed to ''
-	# to listen on all interfaces.
-	IP_ADDRESS			= '205.160.176.5'
-
-	# Root of the http and ftp server's published filesystems.
-	PUBLISHING_ROOT		= '/home/www'
-
+    HOSTNAME			= 'www.nightmare.com'
+    # This is the IP address of the network interface you want
+    # your servers to be visible from.  This can be changed to ''
+    # to listen on all interfaces.
+    IP_ADDRESS			= '205.160.176.5'
+    
+    # Root of the http and ftp server's published filesystems.
+    PUBLISHING_ROOT		= '/home/www'
+    
 HTTP_PORT		= 8080 # The standard port is 80
 FTP_PORT		= 8021 # The standard port is 21
 CHAT_PORT		= 8888
@@ -111,27 +111,27 @@
 # Unix user `public_html' directory support
 # ===========================================================================
 if os.name == 'posix':
-	import unix_user_handler
-	uh = unix_user_handler.unix_user_handler ('public_html')
-	hs.install_handler (uh)
-
-# ===========================================================================
-# FTP Server
-# ===========================================================================
-
-# Here we create an 'anonymous' ftp server.
-# Note: the ftp server is read-only by default. [in this mode, all
-# 'write-capable' commands are unavailable]
-
+    import unix_user_handler
+    uh = unix_user_handler.unix_user_handler ('public_html')
+    hs.install_handler (uh)
+    
+    # ===========================================================================
+    # FTP Server
+    # ===========================================================================
+    
+    # Here we create an 'anonymous' ftp server.
+    # Note: the ftp server is read-only by default. [in this mode, all
+    # 'write-capable' commands are unavailable]
+    
 ftp = ftp_server.ftp_server (
-	ftp_server.anon_authorizer (
-		PUBLISHING_ROOT
-		),
-	ip=IP_ADDRESS,
-	port=FTP_PORT,
-	resolver=rs,
-	logger_object=lg
-	)
+        ftp_server.anon_authorizer (
+                PUBLISHING_ROOT
+                ),
+        ip=IP_ADDRESS,
+        port=FTP_PORT,
+        resolver=rs,
+        logger_object=lg
+        )
 
 # ===========================================================================
 # Monitor Server:
@@ -169,13 +169,13 @@
 # description of the status of the object.
 
 status_objects = [
-	hs,
-	ftp,
-	ms,
-	cs,
-	rs,
-	lg
-	]
+        hs,
+        ftp,
+        ms,
+        cs,
+        rs,
+        lg
+        ]
 
 # Create a status handler.  By default it binds to the URI '/status'...
 sh = status_handler.status_extension(status_objects)
@@ -184,15 +184,15 @@
 
 # become 'nobody'
 if os.name == 'posix':
-	import os
-	if hasattr (os, 'seteuid'):
-		# look in ~medusa/patches for {set,get}euid.
-		import pwd
-		[uid, gid] = pwd.getpwnam ('nobody')[2:4]
-		os.setegid (gid)
-		os.seteuid (uid)
-
-# Finally, start up the server loop!  This loop will not exit until
-# all clients and servers are closed.  You may cleanly shut the system
-# down by sending SIGINT (a.k.a. KeyboardInterrupt).
+    import os
+    if hasattr (os, 'seteuid'):
+            # look in ~medusa/patches for {set,get}euid.
+        import pwd
+        [uid, gid] = pwd.getpwnam ('nobody')[2:4]
+        os.setegid (gid)
+        os.seteuid (uid)
+        
+        # Finally, start up the server loop!  This loop will not exit until
+        # all clients and servers are closed.  You may cleanly shut the system
+        # down by sending SIGINT (a.k.a. KeyboardInterrupt).
 asyncore.loop()

--- Updated File status_handler.py in package Zope2 --
--- status_handler.py	2001/04/25 19:07:34	1.6
+++ status_handler.py	2001/05/01 11:44:49	1.7
@@ -19,264 +19,264 @@
 START_TIME = long(time.time())
 
 class status_extension:
-	hit_counter = counter()
-
-	def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
-		self.objects = objects
-		self.statusdir = statusdir
-		self.allow_emergency_debug = allow_emergency_debug
-		# We use /status instead of statusdir here because it's too
-		# hard to pass statusdir to the logger, who makes the HREF
-		# to the object dir.  We don't need the security-through-
-		# obscurity here in any case, because the id is obscurity enough
-		self.hyper_regex = re.compile('/status/object/([0-9]+)/.*')
-		self.hyper_objects = []
-		for object in objects:
-			self.register_hyper_object (object)
-
-	def __repr__ (self):
-		return '<Status Extension (%s hits) at %x>' % (
-			self.hit_counter,
-			id(self)
-			)
-
-	def match (self, request):
-		path, params, query, fragment = request.split_uri()
-		# For reasons explained above, we don't use statusdir for /object
-		return (path[:len(self.statusdir)] == self.statusdir or
-				path[:len("/status/object/")] == '/status/object/')
-
-	# Possible Targets:
-	# /status
-	# /status/channel_list
-	# /status/medusa.gif
-
-	# can we have 'clickable' objects?
-	# [yes, we can use id(x) and do a linear search]
-
-	# Dynamic producers:
-	# HTTP/1.0: we must close the channel, because it's dynamic output
-	# HTTP/1.1: we can use the chunked transfer-encoding, and leave
-	#   it open.
-
-	def handle_request (self, request):
-		[path, params, query, fragment] = split_path (request.uri)
-		self.hit_counter.increment()
-		if path == self.statusdir:          # and not a subdirectory
-			up_time = string.join (english_time (long(time.time()) - START_TIME))
-			request['Content-Type'] = 'text/html'
-			request.push (
-				'<html>'
-				'<title>Medusa Status Reports</title>'
-				'<body bgcolor="#ffffff">'
-				'<h1>Medusa Status Reports</h1>'
-				'<b>Up:</b> %s' % up_time
-				)
-			for i in range(len(self.objects)):
-				request.push (self.objects[i].status())
-				request.push ('<hr>\r\n')
-			request.push (
-				'<p><a href="%s/channel_list">Channel List</a>'
-				'<hr>'
-				'<img src="%s/medusa.gif" align=right width=%d height=%d>'
-				'</body></html>' % (
-					self.statusdir,
-					self.statusdir,
-					medusa_gif.width,
-					medusa_gif.height
-					)
-				)
-			request.done()
-		elif path == self.statusdir + '/channel_list':
-			request['Content-Type'] = 'text/html'
-			request.push ('<html><body>')
-			request.push(channel_list_producer(self.statusdir))
-			request.push (
-				'<hr>'
-				'<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
-					self.statusdir,
-					medusa_gif.width, 
-					medusa_gif.height
-					) +
-				'</body></html>'
-				)
-			request.done()
-
-		elif path == self.statusdir + '/medusa.gif':
-			request['Content-Type'] = 'image/gif'
-			request['Content-Length'] = len(medusa_gif.data)
-			request.push (medusa_gif.data)
-			request.done()
-
-		elif path == self.statusdir + '/close_zombies':
-			message = (
-				'<h2>Closing all zombie http client connections...</h2>'
-				'<p><a href="%s">Back to the status page</a>' % self.statusdir
-				)
-			request['Content-Type'] = 'text/html'
-			request['Content-Length'] = len (message)
-			request.push (message)
-			now = int (time.time())
-			for channel in asyncore.socket_map.keys():
-				if channel.__class__ == http_server.http_channel:
-					if channel != request.channel:
-						if (now - channel.creation_time) > channel.zombie_timeout:
-							channel.close()
-			request.done()
-
-		# Emergency Debug Mode
-		# If a server is running away from you, don't KILL it!
-		# Move all the AF_INET server ports and perform an autopsy...
-		# [disabled by default to protect the innocent]
-		elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
-			request.push ('<html>Moving All Servers...</html>')
-			request.done()
-			for channel in asyncore.socket_map.keys():
-				if channel.accepting:
-					if type(channel.addr) is type(()):
-						ip, port = channel.addr
-						channel.socket.close()
-						channel.del_channel()
-						channel.addr = (ip, port+10000)
-						fam, typ = channel.family_and_type
-						channel.create_socket (fam, typ)
-						channel.set_reuse_addr()
-						channel.bind (channel.addr)
-						channel.listen(5)
-
-		else:
-			m = self.hyper_regex.match (path)
-			if m:
-				oid = string.atoi (m.group (1))
-				for object in self.hyper_objects:
-					if id (object) == oid:
-						if hasattr (object, 'hyper_respond'):
-							object.hyper_respond (self, path, request)
-			else:
-				request.error (404)
-				return
-
-	def status (self):
-		return producers.simple_producer (
-			'<li>Status Extension <b>Hits</b> : %s' % self.hit_counter
-			)
-
-	def register_hyper_object (self, object):
-		if not object in self.hyper_objects:
-			self.hyper_objects.append (object)
-
+    hit_counter = counter()
+    
+    def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
+        self.objects = objects
+        self.statusdir = statusdir
+        self.allow_emergency_debug = allow_emergency_debug
+        # We use /status instead of statusdir here because it's too
+        # hard to pass statusdir to the logger, who makes the HREF
+        # to the object dir.  We don't need the security-through-
+        # obscurity here in any case, because the id is obscurity enough
+        self.hyper_regex = re.compile('/status/object/([0-9]+)/.*')
+        self.hyper_objects = []
+        for object in objects:
+            self.register_hyper_object (object)
+            
+    def __repr__ (self):
+        return '<Status Extension (%s hits) at %x>' % (
+                self.hit_counter,
+                id(self)
+                )
+        
+    def match (self, request):
+        path, params, query, fragment = request.split_uri()
+        # For reasons explained above, we don't use statusdir for /object
+        return (path[:len(self.statusdir)] == self.statusdir or
+                        path[:len("/status/object/")] == '/status/object/')
+        
+        # Possible Targets:
+        # /status
+        # /status/channel_list
+        # /status/medusa.gif
+        
+        # can we have 'clickable' objects?
+        # [yes, we can use id(x) and do a linear search]
+        
+        # Dynamic producers:
+        # HTTP/1.0: we must close the channel, because it's dynamic output
+        # HTTP/1.1: we can use the chunked transfer-encoding, and leave
+        #   it open.
+        
+    def handle_request (self, request):
+        [path, params, query, fragment] = split_path (request.uri)
+        self.hit_counter.increment()
+        if path == self.statusdir:          # and not a subdirectory
+            up_time = string.join (english_time (long(time.time()) - START_TIME))
+            request['Content-Type'] = 'text/html'
+            request.push (
+                    '<html>'
+                    '<title>Medusa Status Reports</title>'
+                    '<body bgcolor="#ffffff">'
+                    '<h1>Medusa Status Reports</h1>'
+                    '<b>Up:</b> %s' % up_time
+                    )
+            for i in range(len(self.objects)):
+                request.push (self.objects[i].status())
+                request.push ('<hr>\r\n')
+            request.push (
+                    '<p><a href="%s/channel_list">Channel List</a>'
+                    '<hr>'
+                    '<img src="%s/medusa.gif" align=right width=%d height=%d>'
+                    '</body></html>' % (
+                            self.statusdir,
+                            self.statusdir,
+                            medusa_gif.width,
+                            medusa_gif.height
+                            )
+                    )
+            request.done()
+        elif path == self.statusdir + '/channel_list':
+            request['Content-Type'] = 'text/html'
+            request.push ('<html><body>')
+            request.push(channel_list_producer(self.statusdir))
+            request.push (
+                    '<hr>'
+                    '<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
+                            self.statusdir,
+                            medusa_gif.width, 
+                            medusa_gif.height
+                            ) +
+                    '</body></html>'
+                    )
+            request.done()
+            
+        elif path == self.statusdir + '/medusa.gif':
+            request['Content-Type'] = 'image/gif'
+            request['Content-Length'] = len(medusa_gif.data)
+            request.push (medusa_gif.data)
+            request.done()
+            
+        elif path == self.statusdir + '/close_zombies':
+            message = (
+                    '<h2>Closing all zombie http client connections...</h2>'
+                    '<p><a href="%s">Back to the status page</a>' % self.statusdir
+                    )
+            request['Content-Type'] = 'text/html'
+            request['Content-Length'] = len (message)
+            request.push (message)
+            now = int (time.time())
+            for channel in asyncore.socket_map.keys():
+                if channel.__class__ == http_server.http_channel:
+                    if channel != request.channel:
+                        if (now - channel.creation_time) > channel.zombie_timeout:
+                            channel.close()
+            request.done()
+            
+            # Emergency Debug Mode
+            # If a server is running away from you, don't KILL it!
+            # Move all the AF_INET server ports and perform an autopsy...
+            # [disabled by default to protect the innocent]
+        elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
+            request.push ('<html>Moving All Servers...</html>')
+            request.done()
+            for channel in asyncore.socket_map.keys():
+                if channel.accepting:
+                    if type(channel.addr) is type(()):
+                        ip, port = channel.addr
+                        channel.socket.close()
+                        channel.del_channel()
+                        channel.addr = (ip, port+10000)
+                        fam, typ = channel.family_and_type
+                        channel.create_socket (fam, typ)
+                        channel.set_reuse_addr()
+                        channel.bind (channel.addr)
+                        channel.listen(5)
+                        
+        else:
+            m = self.hyper_regex.match (path)
+            if m:
+                oid = string.atoi (m.group (1))
+                for object in self.hyper_objects:
+                    if id (object) == oid:
+                        if hasattr (object, 'hyper_respond'):
+                            object.hyper_respond (self, path, request)
+            else:
+                request.error (404)
+                return
+                
+    def status (self):
+        return producers.simple_producer (
+                '<li>Status Extension <b>Hits</b> : %s' % self.hit_counter
+                )
+        
+    def register_hyper_object (self, object):
+        if not object in self.hyper_objects:
+            self.hyper_objects.append (object)
+            
 import logger
 
 class logger_for_status (logger.tail_logger):
 
-	def status (self):
-		return 'Last %d log entries for: %s' % (
-			len (self.messages),
-			html_repr (self)
-			)
-
-	def hyper_respond (self, sh, path, request):
-		request['Content-Type'] = 'text/plain'
-		messages = self.messages[:]
-		messages.reverse()
-		request.push (lines_producer (messages))
-		request.done()
-
+    def status (self):
+        return 'Last %d log entries for: %s' % (
+                len (self.messages),
+                html_repr (self)
+                )
+        
+    def hyper_respond (self, sh, path, request):
+        request['Content-Type'] = 'text/plain'
+        messages = self.messages[:]
+        messages.reverse()
+        request.push (lines_producer (messages))
+        request.done()
+        
 class lines_producer:
-	def __init__ (self, lines):
-		self.lines = lines
-
-	def ready (self):
-		return len(self.lines)
-
-	def more (self):
-		if self.lines:
-			chunk = self.lines[:50]
-			self.lines = self.lines[50:]
-			return string.join (chunk, '\r\n') + '\r\n'
-		else:
-			return ''
-
+    def __init__ (self, lines):
+        self.lines = lines
+        
+    def ready (self):
+        return len(self.lines)
+        
+    def more (self):
+        if self.lines:
+            chunk = self.lines[:50]
+            self.lines = self.lines[50:]
+            return string.join (chunk, '\r\n') + '\r\n'
+        else:
+            return ''
+            
 class channel_list_producer (lines_producer):
-	def __init__ (self, statusdir):
-		channel_reprs = map (
-			lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
-			asyncore.socket_map.values()
-			)
-		channel_reprs.sort()
-		lines_producer.__init__ (
-			self,
-			['<h1>Active Channel List</h1>',
-			 '<pre>'
-			 ] + channel_reprs + [
-				 '</pre>',
-				 '<p><a href="%s">Status Report</a>' % statusdir
-				 ]
-			)
-
-
-# this really needs a full-blown quoter...
+    def __init__ (self, statusdir):
+        channel_reprs = map (
+                lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
+                asyncore.socket_map.values()
+                )
+        channel_reprs.sort()
+        lines_producer.__init__ (
+                self,
+                ['<h1>Active Channel List</h1>',
+                 '<pre>'
+                 ] + channel_reprs + [
+                         '</pre>',
+                         '<p><a href="%s">Status Report</a>' % statusdir
+                         ]
+                )
+        
+        
+        # this really needs a full-blown quoter...
 def sanitize (s):
-	if '<' in s:
-		s = string.join (string.split (s, '<'), '&lt;')
-	if '>' in s:
-		s = string.join (string.split (s, '>'), '&gt;')
-	return s
-
+    if '<' in s:
+        s = string.join (string.split (s, '<'), '&lt;')
+    if '>' in s:
+        s = string.join (string.split (s, '>'), '&gt;')
+    return s
+    
 def html_repr (object):
-	so = sanitize (repr (object))
-	if hasattr (object, 'hyper_respond'):
-		return '<a href="/status/object/%d/">%s</a>' % (id (object), so)
-	else:
-		return so
-
+    so = sanitize (repr (object))
+    if hasattr (object, 'hyper_respond'):
+        return '<a href="/status/object/%d/">%s</a>' % (id (object), so)
+    else:
+        return so
+        
 def html_reprs (list, front='', back=''):
-	reprs = map (
-		lambda x,f=front,b=back: '%s%s%s' % (f,x,b),
-		map (lambda x: sanitize (html_repr(x)), list)
-		)
-	reprs.sort()
-	return reprs
-
-# for example, tera, giga, mega, kilo
-# p_d (n, (1024, 1024, 1024, 1024))
-# smallest divider goes first - for example
-# minutes, hours, days
-# p_d (n, (60, 60, 24))
-
+    reprs = map (
+            lambda x,f=front,b=back: '%s%s%s' % (f,x,b),
+            map (lambda x: sanitize (html_repr(x)), list)
+            )
+    reprs.sort()
+    return reprs
+    
+    # for example, tera, giga, mega, kilo
+    # p_d (n, (1024, 1024, 1024, 1024))
+    # smallest divider goes first - for example
+    # minutes, hours, days
+    # p_d (n, (60, 60, 24))
+    
 def progressive_divide (n, parts):
-	result = []
-	for part in parts:
-		n, rem = divmod (n, part)
-		result.append (rem)
-	result.append (n)
-	return result
-
-# b,k,m,g,t
+    result = []
+    for part in parts:
+        n, rem = divmod (n, part)
+        result.append (rem)
+    result.append (n)
+    return result
+    
+    # b,k,m,g,t
 def split_by_units (n, units, dividers, format_string):
-	divs = progressive_divide (n, dividers)
-	result = []
-	for i in range(len(units)):
-		if divs[i]:
-			result.append (format_string % (divs[i], units[i]))
-	result.reverse()
-	if not result:
-		return [format_string % (0, units[0])]
-	else:
-		return result
-
+    divs = progressive_divide (n, dividers)
+    result = []
+    for i in range(len(units)):
+        if divs[i]:
+            result.append (format_string % (divs[i], units[i]))
+    result.reverse()
+    if not result:
+        return [format_string % (0, units[0])]
+    else:
+        return result
+        
 def english_bytes (n):
-	return split_by_units (
-		n,
-		('','K','M','G','T'),
-		(1024, 1024, 1024, 1024, 1024),
-		'%d %sB'
-		)
-
+    return split_by_units (
+            n,
+            ('','K','M','G','T'),
+            (1024, 1024, 1024, 1024, 1024),
+            '%d %sB'
+            )
+    
 def english_time (n):
-	return split_by_units (
-		n,
-		('secs', 'mins', 'hours', 'days', 'weeks', 'years'),
-		(         60,     60,      24,     7,       52),
-		'%d %s'
-		)
+    return split_by_units (
+            n,
+            ('secs', 'mins', 'hours', 'days', 'weeks', 'years'),
+            (         60,     60,      24,     7,       52),
+            '%d %s'
+            )

--- Updated File unix_user_handler.py in package Zope2 --
--- unix_user_handler.py	2001/04/25 19:07:34	1.2
+++ unix_user_handler.py	2001/05/01 11:44:49	1.3
@@ -22,59 +22,59 @@
 
 class unix_user_handler (default_handler.default_handler):
 
-	def __init__ (self, public_html = 'public_html'):
-		self.public_html = public_html
-		default_handler.default_handler.__init__ (self, None)
-
-	# cache userdir-filesystem objects
-	fs_cache = {}
-
-	def match (self, request):
-		m = user_dir.match (request.uri)
-		return m and (m.end() == len (request.uri))
-			
-	def handle_request (self, request):
-		# get the user name
-		user = user_dir.group(1)
-		rest = user_dir.group(2)
-
-		# special hack to catch those lazy URL typers
-		if not rest:
-			request['Location'] = 'http://%s/~%s/' % (
-				request.channel.server.server_name,
-				user
-				)
-			request.error (301)
-			return 
-
-		# have we already built a userdir fs for this user?
-		if self.fs_cache.has_key (user):
-			fs = self.fs_cache[user]
-		else:
-			# no, well then, let's build one.
-			# first, find out where the user directory is
-			try:
-				info = pwd.getpwnam (user)
-			except KeyError:
-				request.error (404)
-				return
-			ud = info[5] + '/' + self.public_html
-			if os.path.isdir (ud):
-				fs = filesys.os_filesystem (ud)
-				self.fs_cache[user] = fs
-			else:
-				request.error (404)
-				return
-
-		# fake out default_handler
-		self.filesystem = fs
-		# massage the request URI
-		request.uri = '/' + rest
-		return default_handler.default_handler.handle_request (self, request)
-
-	def __repr__ (self):
-		return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (
-			id(self),
-			self.public_html,
-			len(self.fs_cache)
-			)
+    def __init__ (self, public_html = 'public_html'):
+        self.public_html = public_html
+        default_handler.default_handler.__init__ (self, None)
+        
+        # cache userdir-filesystem objects
+    fs_cache = {}
+    
+    def match (self, request):
+        m = user_dir.match (request.uri)
+        return m and (m.end() == len (request.uri))
+        
+    def handle_request (self, request):
+            # get the user name
+        user = user_dir.group(1)
+        rest = user_dir.group(2)
+        
+        # special hack to catch those lazy URL typers
+        if not rest:
+            request['Location'] = 'http://%s/~%s/' % (
+                    request.channel.server.server_name,
+                    user
+                    )
+            request.error (301)
+            return 
+            
+            # have we already built a userdir fs for this user?
+        if self.fs_cache.has_key (user):
+            fs = self.fs_cache[user]
+        else:
+                # no, well then, let's build one.
+                # first, find out where the user directory is
+            try:
+                info = pwd.getpwnam (user)
+            except KeyError:
+                request.error (404)
+                return
+            ud = info[5] + '/' + self.public_html
+            if os.path.isdir (ud):
+                fs = filesys.os_filesystem (ud)
+                self.fs_cache[user] = fs
+            else:
+                request.error (404)
+                return
+                
+                # fake out default_handler
+        self.filesystem = fs
+        # massage the request URI
+        request.uri = '/' + rest
+        return default_handler.default_handler.handle_request (self, request)
+        
+    def __repr__ (self):
+        return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (
+                id(self),
+                self.public_html,
+                len(self.fs_cache)
+                )

--- Updated File virtual_handler.py in package Zope2 --
--- virtual_handler.py	2001/04/25 19:07:34	1.2
+++ virtual_handler.py	2001/05/01 11:44:49	1.3
@@ -10,51 +10,51 @@
 
 class virtual_handler:
 
-	"""HTTP request handler for an HTTP/1.0-style virtual host.  Each
-	Virtual host must have a different IP"""
-
-	def __init__ (self, handler, hostname):
-		self.handler = handler
-		self.hostname = hostname
-		try:
-			self.ip = socket.gethostbyname (hostname)
-		except socket.error:
-			raise ValueError, "Virtual Hostname %s does not appear to be registered in the DNS" % hostname
-
-	def match (self, request):
-		if (request.channel.addr[0] == self.ip):
-			return 1
-		else:
-			return 0
-
-	def handle_request (self, request):
-		return self.handler.handle_request (request)
-
-	def __repr__ (self):
-		return '<virtual request handler for %s>' % self.hostname
-
-
+    """HTTP request handler for an HTTP/1.0-style virtual host.  Each
+    Virtual host must have a different IP"""
+    
+    def __init__ (self, handler, hostname):
+        self.handler = handler
+        self.hostname = hostname
+        try:
+            self.ip = socket.gethostbyname (hostname)
+        except socket.error:
+            raise ValueError, "Virtual Hostname %s does not appear to be registered in the DNS" % hostname
+            
+    def match (self, request):
+        if (request.channel.addr[0] == self.ip):
+            return 1
+        else:
+            return 0
+            
+    def handle_request (self, request):
+        return self.handler.handle_request (request)
+        
+    def __repr__ (self):
+        return '<virtual request handler for %s>' % self.hostname
+        
+        
 class virtual_handler_with_host:
-
-	"""HTTP request handler for HTTP/1.1-style virtual hosts.  This
-	matches by checking the value of the 'Host' header in the request.
-	You actually don't _have_ to support HTTP/1.1 to use this, since
-	many browsers now send the 'Host' header.  This is a Good Thing."""
-
-	def __init__ (self, handler, hostname):
-		self.handler = handler
-		self.hostname = hostname
-
-	def match (self, request):
-		host = get_header (HOST, request.header)
-		if host == self.hostname:
-			return 1
-		else:
-			return 0
-		
-	def handle_request (self, request):
-		return self.handler.handle_request (request)
-
-	def __repr__ (self):
-		return '<virtual request handler for %s>' % self.hostname
 
+    """HTTP request handler for HTTP/1.1-style virtual hosts.  This
+    matches by checking the value of the 'Host' header in the request.
+    You actually don't _have_ to support HTTP/1.1 to use this, since
+    many browsers now send the 'Host' header.  This is a Good Thing."""
+    
+    def __init__ (self, handler, hostname):
+        self.handler = handler
+        self.hostname = hostname
+        
+    def match (self, request):
+        host = get_header (HOST, request.header)
+        if host == self.hostname:
+            return 1
+        else:
+            return 0
+            
+    def handle_request (self, request):
+        return self.handler.handle_request (request)
+        
+    def __repr__ (self):
+        return '<virtual request handler for %s>' % self.hostname
+        

--- Updated File xmlrpc_handler.py in package Zope2 --
--- xmlrpc_handler.py	2001/04/25 19:07:34	1.2
+++ xmlrpc_handler.py	2001/05/01 11:44:49	1.3
@@ -15,90 +15,90 @@
 
 class xmlrpc_handler:
 
-	def match (self, request):
-		# Note: /RPC2 is not required by the spec, so you may override this method.
-		if request.uri[:5] == '/RPC2':
-			return 1
-		else:
-			return 0
-
-	def handle_request (self, request):
-		[path, params, query, fragment] = request.split_uri()
-		
-		if request.command in ('post', 'put'):
-			request.collector = collector (self, request)
-		else:
-			request.error (400)
-
-	def continue_request (self, data, request):
-		params, method = xmlrpclib.loads (data)
-		try:
-			# generate response
-			try:
-				response = self.call (method, params)
-				if type(response) != type(()):
-					response = (response,)
-			except:
-				# report exception back to server
-				response = xmlrpclib.dumps (
-					xmlrpclib.Fault (1, "%s:%s" % (sys.exc_type, sys.exc_value))
-					)
-			else:
-				response = xmlrpclib.dumps (response, methodresponse=1)
-		except:
-			# internal error, report as HTTP server error
-			request.error (500)
-		else:
-			# got a valid XML RPC response
-			request['Content-Type'] = 'text/xml'
-			request.push (response)
-			request.done()
-
-	def call (self, method, params):
-		# override this method to implement RPC methods
-		raise "NotYetImplemented"
-
+    def match (self, request):
+            # Note: /RPC2 is not required by the spec, so you may override this method.
+        if request.uri[:5] == '/RPC2':
+            return 1
+        else:
+            return 0
+            
+    def handle_request (self, request):
+        [path, params, query, fragment] = request.split_uri()
+        
+        if request.command in ('post', 'put'):
+            request.collector = collector (self, request)
+        else:
+            request.error (400)
+            
+    def continue_request (self, data, request):
+        params, method = xmlrpclib.loads (data)
+        try:
+                # generate response
+            try:
+                response = self.call (method, params)
+                if type(response) != type(()):
+                    response = (response,)
+            except:
+                    # report exception back to server
+                response = xmlrpclib.dumps (
+                        xmlrpclib.Fault (1, "%s:%s" % (sys.exc_type, sys.exc_value))
+                        )
+            else:
+                response = xmlrpclib.dumps (response, methodresponse=1)
+        except:
+                # internal error, report as HTTP server error
+            request.error (500)
+        else:
+                # got a valid XML RPC response
+            request['Content-Type'] = 'text/xml'
+            request.push (response)
+            request.done()
+            
+    def call (self, method, params):
+            # override this method to implement RPC methods
+        raise "NotYetImplemented"
+        
 class collector:
-
-	"gathers input for POST and PUT requests"
-
-	def __init__ (self, handler, request):
-
-		self.handler = handler
-		self.request = request
-		self.data = ''
 
-		# make sure there's a content-length header
-		cl = request.get_header ('content-length')
-
-		if not cl:
-			request.error (411)
-		else:
-			cl = string.atoi (cl)
-			# using a 'numeric' terminator
-			self.request.channel.set_terminator (cl)
-
-	def collect_incoming_data (self, data):
-		self.data = self.data + data
-
-	def found_terminator (self):
-		# set the terminator back to the default
-		self.request.channel.set_terminator ('\r\n\r\n')
-		self.handler.continue_request (self.data, self.request)
-
+    "gathers input for POST and PUT requests"
+    
+    def __init__ (self, handler, request):
+    
+        self.handler = handler
+        self.request = request
+        self.data = ''
+        
+        # make sure there's a content-length header
+        cl = request.get_header ('content-length')
+        
+        if not cl:
+            request.error (411)
+        else:
+            cl = string.atoi (cl)
+            # using a 'numeric' terminator
+            self.request.channel.set_terminator (cl)
+            
+    def collect_incoming_data (self, data):
+        self.data = self.data + data
+        
+    def found_terminator (self):
+            # set the terminator back to the default
+        self.request.channel.set_terminator ('\r\n\r\n')
+        self.handler.continue_request (self.data, self.request)
+        
 if __name__ == '__main__':
-
-	class rpc_demo (xmlrpc_handler):
-		
-		def call (self, method, params):
-			print 'method="%s" params=%s' % (method, params)
-			return "Sure, that works"
-
-	import asyncore
-	import http_server
 
-	hs = http_server.http_server ('', 8000)
-	rpc = rpc_demo()
-	hs.install_handler (rpc)
-	
-	asyncore.loop()
+    class rpc_demo (xmlrpc_handler):
+    
+        def call (self, method, params):
+            print 'method="%s" params=%s' % (method, params)
+            return "Sure, that works"
+            
+    import asyncore
+    import http_server
+    
+    hs = http_server.http_server ('', 8000)
+    rpc = rpc_demo()
+    hs.install_handler (rpc)
+    
+    asyncore.loop()