[Zope-Checkins] CVS: Zope/lib/python/ZServer/medusa - __init__.py:1.9 chat_server.py:1.5 counter.py:1.9 default_handler.py:1.9 event_loop.py:1.5 fifo.py:1.5 filesys.py:1.13 ftp_server.py:1.23 http_bobo.py:1.6 http_date.py:1.11 http_server.py:1.35 logger.py:1.18 m_syslog.py:1.14 medusa.html:1.4 medusa_gif.py:1.8 mime_type_table.py:1.8 monitor.py:1.15 monitor_client.py:1.10 monitor_client_win32.py:1.9 producers.py:1.12 put_handler.py:1.5 redirecting_handler.py:1.5 resolver.py:1.12 status_handler.py:1.9

Fred L. Drake, Jr. fred@zope.com
Tue, 18 Mar 2003 16:15:49 -0500


Update of /cvs-repository/Zope/lib/python/ZServer/medusa
In directory cvs.zope.org:/tmp/cvs-serv23589/ZServer/medusa

Added Files:
	__init__.py chat_server.py counter.py default_handler.py 
	event_loop.py fifo.py filesys.py ftp_server.py http_bobo.py 
	http_date.py http_server.py logger.py m_syslog.py medusa.html 
	medusa_gif.py mime_type_table.py monitor.py monitor_client.py 
	monitor_client_win32.py producers.py put_handler.py 
	redirecting_handler.py resolver.py status_handler.py 
Log Message:
Move ZServer into new location, including configuration support from the
new-install-branch.


=== Zope/lib/python/ZServer/medusa/__init__.py 1.8 => 1.9 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/__init__.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,3 @@
+# Make medusa into a package
+
+__version__='$Revision$'[11:-2]


=== Zope/lib/python/ZServer/medusa/chat_server.py 1.4 => 1.5 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/chat_server.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,150 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1997-2000 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID = '$Id$'
+
+import string
+
+VERSION = string.split(RCS_ID)[2]
+
+import socket
+import asyncore
+import asynchat
+import status_handler
+
+class chat_channel (asynchat.async_chat):
+
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        self.nick = None
+        self.push ('nickname?: ')
+        
+    def collect_incoming_data (self, data):
+        self.data = self.data + data
+        
+    def found_terminator (self):
+        line = self.data
+        self.data = ''
+        if self.nick is None:
+            self.nick = string.split (line)[0]
+            if not self.nick:
+                self.nick = None
+                self.push ('huh? gimmee a nickname: ')
+            else:
+                self.greet()
+        else:
+            if not line:
+                pass
+            elif line[0] != '/':
+                self.server.push_line (self, line)
+            else:
+                self.handle_command (line)
+                
+    def greet (self):
+        self.push ('Hello, %s\r\n' % self.nick)
+        num_channels = len(self.server.channels)-1
+        if num_channels == 0:
+            self.push ('[Kinda lonely in here... you\'re the only caller!]\r\n')
+        else:
+            self.push ('[There are %d other callers]\r\n' % (len(self.server.channels)-1))
+            nicks = map (lambda x: x.get_nick(), self.server.channels.keys())
+            self.push (string.join (nicks, '\r\n  ') + '\r\n')
+            self.server.push_line (self, '[joined]')
+            
+    def handle_command (self, command):
+        import types
+        command_line = string.split(command)
+        name = 'cmd_%s' % command_line[0][1:]
+        if hasattr (self, name):
+                # make sure it's a method...
+            method = getattr (self, name)
+            if type(method) == type(self.handle_command):
+                method (command_line[1:])
+            else:
+                self.push ('unknown command: %s' % command_line[0])
+                
+    def cmd_quit (self, args):
+        self.server.push_line (self, '[left]')
+        self.push ('Goodbye!\r\n')
+        self.close_when_done()
+        
+        # alias for '/quit' - '/q'
+    cmd_q = cmd_quit
+    
+    def push_line (self, nick, line):
+        self.push ('%s: %s\r\n' % (nick, line))
+        
+    def handle_close (self):
+        self.close()
+        
+    def close (self):
+        del self.server.channels[self]
+        asynchat.async_chat.close (self)
+        
+    def get_nick (self):
+        if self.nick is not None:
+            return self.nick
+        else:
+            return 'Unknown'
+            
+class chat_server (asyncore.dispatcher):
+
+    SERVER_IDENT = 'Chat Server (V%s)' % VERSION
+    
+    channel_class = chat_channel
+    
+    spy = 1
+    
+    def __init__ (self, ip='', port=8518):
+        self.port = port
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.bind ((ip, port))
+        print '%s started on port %d' % (self.SERVER_IDENT, port)
+        self.listen (5)
+        self.channels = {}
+        self.count = 0
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        self.count = self.count + 1
+        print 'client #%d - %s:%d' % (self.count, addr[0], addr[1])
+        self.channels[self.channel_class (self, conn, addr)] = 1
+        
+    def push_line (self, from_channel, line):
+        nick = from_channel.get_nick()
+        if self.spy:
+            print '%s: %s' % (nick, line)
+        for c in self.channels.keys():
+            if c is not from_channel:
+                c.push ('%s: %s\r\n' % (nick, line))
+                
+    def status (self):
+        lines = [
+                '<h2>%s</h2>'						% self.SERVER_IDENT,
+                '<br>Listening on Port: %d'			% self.port,
+                '<br><b>Total Sessions:</b> %d'		% self.count,
+                '<br><b>Current Sessions:</b> %d'	% (len(self.channels))
+                ]
+        return status_handler.lines_producer (lines)
+        
+    def writable (self):
+        return 0
+        
+if __name__ == '__main__':
+    import sys
+    
+    if len(sys.argv) > 1:
+        port = string.atoi (sys.argv[1])
+    else:
+        port = 8518
+        
+    s = chat_server ('', port)
+    asyncore.loop()


=== Zope/lib/python/ZServer/medusa/counter.py 1.8 => 1.9 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/counter.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,47 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# It is tempting to add an __int__ method to this class, but it's not
+# a good idea.  This class tries to gracefully handle integer
+# overflow, and to hide this detail from both the programmer and the
+# user.  Note that the __str__ method can be relied on for printing out
+# the value of a counter:
+#
+# >>> print 'Total Client: %s' % self.total_clients
+#
+# If you need to do arithmetic with the value, then use the 'as_long'
+# method, the use of long arithmetic is a reminder that the counter
+# will overflow.
+
+class counter:
+    "general-purpose counter"
+    
+    def __init__ (self, initial_value=0):
+        self.value = initial_value
+        
+    def increment (self, delta=1):
+        result = self.value
+        try:
+            self.value = self.value + delta
+        except OverflowError:
+            self.value = long(self.value) + delta
+        return result
+        
+    def decrement (self, delta=1):
+        result = self.value
+        try:
+            self.value = self.value - delta
+        except OverflowError:
+            self.value = long(self.value) - delta
+        return result
+        
+    def as_long (self):
+        return long(self.value)
+        
+    def __nonzero__ (self):
+        return self.value != 0
+        
+    def __repr__ (self):
+        return '<counter value=%s at %x>' % (self.value, id(self))
+        
+    def __str__ (self):
+        return str(long(self.value))[:-1]


=== Zope/lib/python/ZServer/medusa/default_handler.py 1.8 => 1.9 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/default_handler.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,217 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1997 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID = '$Id$'
+
+# standard python modules
+import os
+import re
+import posixpath
+import stat
+import string
+import time
+
+# medusa modules
+import http_date
+import http_server
+import mime_type_table
+import status_handler
+import producers
+
+unquote = http_server.unquote
+
+# This is the 'default' handler.  it implements the base set of
+# features expected of a simple file-delivering HTTP server.  file
+# services are provided through a 'filesystem' object, the very same
+# one used by the FTP server.
+#
+# You can replace or modify this handler if you want a non-standard
+# HTTP server.  You can also derive your own handler classes from
+# it.
+#
+# support for handling POST requests is available in the derived
+# class <default_with_post_handler>, defined below.
+#
+
+from counter import counter
+
+class default_handler:
+
+    valid_commands = ['get', 'head']
+    
+    IDENT = 'Default HTTP Request Handler'
+    
+    # Pathnames that are tried when a URI resolves to a directory name
+    directory_defaults = [
+            'index.html',
+            'default.html'
+            ]
+    
+    default_file_producer = producers.file_producer
+    
+    def __init__ (self, filesystem):
+        self.filesystem = filesystem
+        # count total hits
+        self.hit_counter = counter()
+        # count file deliveries
+        self.file_counter = counter()
+        # count cache hits
+        self.cache_counter = counter()
+        
+    hit_counter = 0
+    
+    def __repr__ (self):
+        return '<%s (%s hits) at %x>' % (
+                self.IDENT,
+                self.hit_counter,
+                id (self)
+                )
+        
+        # always match, since this is a default
+    def match (self, request):
+        return 1
+        
+        # handle a file request, with caching.
+        
+    def handle_request (self, request):
+    
+        if request.command not in self.valid_commands:
+            request.error (400) # bad request
+            return
+            
+        self.hit_counter.increment()
+        
+        path, params, query, fragment = request.split_uri()
+        
+        if '%' in path:
+            path = unquote (path)
+            
+            # strip off all leading slashes
+        while path and path[0] == '/':
+            path = path[1:]
+            
+        if self.filesystem.isdir (path):
+            if path and path[-1] != '/':
+                request['Location'] = 'http://%s/%s/' % (
+                        request.channel.server.server_name,
+                        path
+                        )
+                request.error (301)
+                return
+                
+                # we could also generate a directory listing here,
+                # may want to move this into another method for that
+                # purpose
+            found = 0
+            if path and path[-1] != '/':
+                path = path + '/'
+            for default in self.directory_defaults:
+                p = path + default
+                if self.filesystem.isfile (p):
+                    path = p
+                    found = 1
+                    break
+            if not found:
+                request.error (404) # Not Found 
+                return
+                
+        elif not self.filesystem.isfile (path):
+            request.error (404) # Not Found
+            return
+            
+        file_length = self.filesystem.stat (path)[stat.ST_SIZE]
+        
+        ims = get_header_match (IF_MODIFIED_SINCE, request.header)
+        
+        length_match = 1
+        if ims:
+            length = ims.group (4)
+            if length:
+                try:
+                    length = string.atoi (length)
+                    if length != file_length:
+                        length_match = 0
+                except:
+                    pass
+                    
+        ims_date = 0
+        
+        if ims:
+            ims_date = http_date.parse_http_date (ims.group (1))
+            
+        try:
+            mtime = self.filesystem.stat (path)[stat.ST_MTIME]
+        except:
+            request.error (404)
+            return
+            
+        if length_match and ims_date:
+            if mtime <= ims_date:
+                request.reply_code = 304
+                request.done()
+                self.cache_counter.increment()
+                return
+        try:
+            file = self.filesystem.open (path, 'rb')
+        except IOError:
+            request.error (404)
+            return
+            
+        request['Last-Modified'] = http_date.build_http_date (mtime)
+        request['Content-Length'] = file_length
+        self.set_content_type (path, request)
+        
+        if request.command == 'get':
+            request.push (self.default_file_producer (file))
+            
+        self.file_counter.increment()
+        request.done()
+        
+    def set_content_type (self, path, request):
+        ext = string.lower (get_extension (path))
+        if mime_type_table.content_type_map.has_key (ext):
+            request['Content-Type'] = mime_type_table.content_type_map[ext]
+        else:
+                # TODO: test a chunk off the front of the file for 8-bit
+                # characters, and use application/octet-stream instead.
+            request['Content-Type'] = 'text/plain'
+            
+    def status (self):
+        return producers.simple_producer (
+                '<li>%s' % status_handler.html_repr (self)
+                + '<ul>'
+                + '  <li><b>Total Hits:</b> %s'			% self.hit_counter
+                + '  <li><b>Files Delivered:</b> %s'	% self.file_counter
+                + '  <li><b>Cache Hits:</b> %s'			% self.cache_counter
+                + '</ul>'
+                )
+        
+        # HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
+        # to this header.  I suppose it's purpose is to avoid the overhead
+        # of parsing dates...
+IF_MODIFIED_SINCE = re.compile (
+        'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
+        re.IGNORECASE
+        )
+
+USER_AGENT = re.compile ('User-Agent: (.*)', re.IGNORECASE)
+
+CONTENT_TYPE = re.compile (
+        r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
+        re.IGNORECASE
+        )
+
+get_header = http_server.get_header
+get_header_match = http_server.get_header_match
+
+def get_extension (path):
+    dirsep = string.rfind (path, '/')
+    dotsep = string.rfind (path, '.')
+    if dotsep > dirsep:
+        return path[dotsep+1:]
+    else:
+        return ''


=== Zope/lib/python/ZServer/medusa/event_loop.py 1.4 => 1.5 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/event_loop.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,93 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# This is an alternative event loop that supports 'schedulable events'.
+# You can specify an event callback to take place after <n> seconds.
+
+# Important usage note: The granularity of the time-check is limited
+# by the <timeout> argument to 'go()'; if there is little or no
+# activity and you specify a 30-second timeout interval, then the
+# schedule of events may only be checked at those 30-second intervals.
+# In other words, if you need 1-second resolution, you will have to
+# poll at 1-second intervals.  This facility is more useful for longer
+# timeouts ("if the channel doesn't close in 5 minutes, then forcibly
+# close it" would be a typical usage).
+
+import asyncore
+import bisect
+import time
+
+socket_map = asyncore.socket_map
+
+class event_loop:
+
+    def __init__ (self):
+        self.events = []
+        self.num_channels = 0
+        self.max_channels = 0
+        
+    def go (self, timeout=30.0, granularity=15):
+        global socket_map
+        last_event_check = 0
+        while socket_map:
+            now = int(time.time())
+            if (now - last_event_check) >= granularity:
+                last_event_check = now
+                fired = []
+                # yuck. i want my lisp.
+                i = j = 0
+                while i < len(self.events):
+                    when, what = self.events[i]
+                    if now >= when:
+                        fired.append (what)
+                        j = i + 1
+                    else:
+                        break
+                    i = i + 1
+                if fired:
+                    self.events = self.events[j:]
+                    for what in fired:
+                        what (self, now)
+                        # sample the number of channels
+            n = len(asyncore.socket_map)
+            self.num_channels = n
+            if n > self.max_channels:
+                self.max_channels = n
+            asyncore.poll (timeout)
+            
+    def schedule (self, delta, callback):
+        now = int (time.time())
+        bisect.insort (self.events, (now + delta, callback))
+        
+    def __len__ (self):
+        return len(self.events)
+        
+class test (asyncore.dispatcher):
+
+    def __init__ (self):
+        asyncore.dispatcher.__init__ (self)
+        
+    def handle_connect (self):
+        print 'Connected!'
+        
+    def writable (self):
+        return not self.connected
+        
+    def connect_timeout_callback (self, event_loop, when):
+        if not self.connected:
+            print 'Timeout on connect'
+            self.close()
+            
+    def periodic_thing_callback (self, event_loop, when):
+        print 'A Periodic Event has Occurred!'
+        # re-schedule it.
+        event_loop.schedule (15, self.periodic_thing_callback)
+        
+if __name__ == '__main__':
+    import socket
+    el = event_loop()
+    t = test ()
+    t.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+    el.schedule (10, t.connect_timeout_callback)
+    el.schedule (15, t.periodic_thing_callback)
+    t.connect (('squirl', 80))
+    el.go(1.0)


=== Zope/lib/python/ZServer/medusa/fifo.py 1.4 => 1.5 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/fifo.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,203 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# fifo, implemented with lisp-style pairs.
+# [quick translation of scheme48/big/queue.scm]
+
+class fifo:
+
+    def __init__ (self):
+        self.head, self.tail = None, None
+        self.length = 0
+        self.node_cache = None
+        
+    def __len__ (self):
+        return self.length
+        
+    def push (self, v):
+        self.node_cache = None
+        self.length = self.length + 1
+        p = [v, None]
+        if self.head is None:
+            self.head = p
+        else:
+            self.tail[1] = p
+        self.tail = p
+        
+    def pop (self):
+        self.node_cache = None
+        pair = self.head
+        if pair is None:
+            raise ValueError, "pop() from an empty queue"
+        else:
+            self.length = self.length - 1
+            [value, next] = pair
+            self.head = next
+            if next is None:
+                self.tail = None
+            return value
+            
+    def first (self):
+        if self.head is None:
+            raise ValueError, "first() of an empty queue"
+        else:
+            return self.head[0]
+            
+    def push_front (self, thing):
+        self.node_cache = None
+        self.length = self.length + 1
+        old_head = self.head
+        new_head = [thing, old_head]
+        self.head = new_head
+        if old_head is None:
+            self.tail = new_head
+            
+    def _nth (self, n):
+        i = n
+        h = self.head
+        while i:
+            h = h[1]
+            i = i - 1
+        self.node_cache = n, h[1]
+        return h[0]
+        
+    def __getitem__ (self, index):
+        if (index < 0) or (index >= self.length):
+            raise IndexError, "index out of range"
+        else:
+            if self.node_cache:
+                j, h = self.node_cache
+                if j == index - 1:
+                    result = h[0]
+                    self.node_cache = index, h[1]
+                    return result
+                else:
+                    return self._nth (index)
+            else:
+                return self._nth (index)
+                
+                
+class protected_fifo:
+
+    def __init__ (self, lock=None):
+        if lock is None:
+            import thread
+            self.lock = thread.allocate_lock()
+        else:
+            self.lock = lock
+        self.fifo = fifo.fifo()
+        
+    def push (self, item):
+        try:
+            self.lock.acquire()
+            self.fifo.push (item)
+        finally:
+            self.lock.release()
+            
+    enqueue = push
+    
+    def pop (self):
+        try:
+            self.lock.acquire()
+            return self.fifo.pop()
+        finally:
+            self.lock.release()
+            
+    dequeue = pop
+    
+    def __len__ (self):
+        try:
+            self.lock.acquire()
+            return len(self.queue)
+        finally:
+            self.lock.release()
+            
+class output_fifo:
+
+    EMBEDDED	= 'embedded'
+    EOF			= 'eof'
+    TRIGGER		= 'trigger'
+    
+    def __init__ (self):
+            # containment, not inheritance
+        self.fifo = fifo()
+        self._embedded = None
+        
+    def push_embedded (self, fifo):
+            # push embedded fifo
+        fifo.parent = self # CYCLE
+        self.fifo.push ((self.EMBEDDED, fifo))
+        
+    def push_eof (self):
+            # push end-of-fifo
+        self.fifo.push ((self.EOF, None))
+        
+    def push_trigger (self, thunk):
+        self.fifo.push ((self.TRIGGER, thunk))
+        
+    def push (self, item):
+            # item should be a producer or string
+        self.fifo.push (item)
+        
+        # 'length' is an inaccurate term.  we should
+        # probably use an 'empty' method instead.
+    def __len__ (self):
+        if self._embedded is None:
+            return len(self.fifo)
+        else:
+            return len(self._embedded)
+            
+    def empty (self):
+        return len(self) == 0
+        
+    def first (self):
+        if self._embedded is None:
+            return self.fifo.first()
+        else:
+            return self._embedded.first()
+            
+    def pop (self):
+        if self._embedded is not None:
+            return self._embedded.pop()
+        else:
+            result = self.fifo.pop()
+            # unset self._embedded
+            self._embedded = None
+            # check for special items in the front
+            if len(self.fifo):
+                front = self.fifo.first()
+                if type(front) is type(()):
+                        # special
+                    kind, value = front
+                    if kind is self.EMBEDDED:
+                        self._embedded = value
+                    elif kind is self.EOF:
+                            # break the cycle
+                        parent = self.parent
+                        self.parent = None
+                        # pop from parent
+                        parent._embedded = None
+                    elif kind is self.TRIGGER:
+                            # call the trigger thunk
+                        value()
+                        # remove the special
+                    self.fifo.pop()
+                    # return the originally popped result
+            return result
+            
+def test_embedded():
+    of = output_fifo()
+    f2 = output_fifo()
+    f3 = output_fifo()
+    of.push ('one')
+    of.push_embedded (f2)
+    f2.push ('two')
+    f3.push ('three')
+    f3.push ('four')
+    f2.push_embedded (f3)
+    f3.push_eof()
+    f2.push ('five')
+    f2.push_eof()
+    of.push ('six')
+    of.push ('seven')
+    while 1:
+        print of.pop()


=== Zope/lib/python/ZServer/medusa/filesys.py 1.12 => 1.13 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/filesys.py	Tue Mar 18 16:15:16 2003
@@ -0,0 +1,469 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#	$Id$
+#	Author: Sam Rushing <rushing@nightmare.com>
+#
+# Generic filesystem interface.
+#
+
+# We want to provide a complete wrapper around any and all
+# filesystem operations.
+
+# this class is really just for documentation,
+# identifying the API for a filesystem object.
+
+# opening files for reading, and listing directories, should
+# return a producer.
+
+class abstract_filesystem:
+    def __init__ (self):
+        pass
+        
+    def current_directory (self):
+        "Return a string representing the current directory."
+        pass
+        
+    def listdir (self, path, long=0):
+        """Return a listing of the directory at 'path' The empty string
+        indicates the current directory.  If 'long' is set, instead
+        return a list of (name, stat_info) tuples
+        """
+        pass
+        
+    def open (self, path, mode):
+        "Return an open file object"
+        pass
+        
+    def stat (self, path):
+        "Return the equivalent of os.stat() on the given path."
+        pass
+        
+    def isdir (self, path):
+        "Does the path represent a directory?"
+        pass
+        
+    def isfile (self, path):
+        "Does the path represent a plain file?"
+        pass
+        
+    def cwd (self, path):
+        "Change the working directory."
+        pass
+        
+    def cdup (self):
+        "Change to the parent of the current directory."
+        pass
+        
+        
+    def longify (self, path):
+        """Return a 'long' representation of the filename
+        [for the output of the LIST command]"""
+        pass
+        
+        # standard wrapper around a unix-like filesystem, with a 'false root'
+        # capability.
+        
+        # security considerations: can symbolic links be used to 'escape' the
+        # root?  should we allow it?  if not, then we could scan the
+        # filesystem on startup, but that would not help if they were added
+        # later.  We will probably need to check for symlinks in the cwd method.
+        
+        # what to do if wd is an invalid directory?
+        
+import os,re
+import stat
+import string
+
+def safe_stat (path):
+    try:
+        return (path, os.stat (path))
+    except:
+        return None
+        
+import glob
+
+class os_filesystem:
+    path_module = os.path
+    
+    # set this to zero if you want to disable pathname globbing.
+    # [we currently don't glob, anyway]
+    do_globbing = 1
+    
+    def __init__ (self, root, wd='/'):
+        self.root = root
+        self.wd = wd
+        
+    def current_directory (self):
+        return self.wd
+        
+    def isfile (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        return self.path_module.isfile (self.translate(p))
+        
+    def isdir (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        return self.path_module.isdir (self.translate(p))
+        
+    def cwd (self, path):
+        p = self.normalize (self.path_module.join (self.wd, path))
+        translated_path = self.translate(p)
+        if not self.path_module.isdir (translated_path):
+            return 0
+        else:
+            old_dir = os.getcwd()
+            # temporarily change to that directory, in order
+            # to see if we have permission to do so.
+            try:
+                can = 0
+                try:
+                    os.chdir (translated_path)
+                    can = 1
+                    self.wd = p
+                except:
+                    pass
+            finally:
+                if can:
+                    os.chdir (old_dir)
+            return can
+            
+    def cdup (self):
+        return self.cwd ('..')
+        
+    def listdir (self, path, long=0):
+        p = self.translate (path)
+        # I think we should glob, but limit it to the current
+        # directory only.
+        ld = os.listdir (p)
+        if not long:
+            return list_producer (ld, 0, None)
+        else:
+            old_dir = os.getcwd()
+            try:
+                os.chdir (p)
+                # if os.stat fails we ignore that file.
+                result = filter (None, map (safe_stat, ld))
+            finally:
+                os.chdir (old_dir)
+            return list_producer (result, 1, self.longify)
+            
+            # TODO: implement a cache w/timeout for stat()
+    def stat (self, path):
+        p = self.translate (path)
+        return os.stat (p)
+        
+    def open (self, path, mode):
+        p = self.translate (path)
+        return open (p, mode)
+        
+    def unlink (self, path):
+        p = self.translate (path)
+        return os.unlink (p)
+        
+    def mkdir (self, path):
+        p = self.translate (path)
+        return os.mkdir (p)
+        
+    def rmdir (self, path):
+        p = self.translate (path)
+        return os.rmdir (p)
+        
+        # utility methods
+    def normalize (self, path):
+            # watch for the ever-sneaky '/+' path element
+        path = re.sub ('/+', '/', path)
+        p = self.path_module.normpath (path)
+        # remove 'dangling' cdup's.
+        if len(p) > 2 and p[:3] == '/..':
+            p = '/'
+        return p
+        
+    def translate (self, path):
+            # we need to join together three separate
+            # path components, and do it safely.
+            # <real_root>/<current_directory>/<path>
+            # use the operating system's path separator.
+        path = string.join (string.split (path, '/'), os.sep)
+        p = self.normalize (self.path_module.join (self.wd, path))
+        p = self.normalize (self.path_module.join (self.root, p[1:]))
+        return p
+        
+    def longify (self, (path, stat_info)):
+        return unix_longify (path, stat_info)
+        
+    def __repr__ (self):
+        return '<unix-style fs root:%s wd:%s>' % (
+                self.root,
+                self.wd
+                )
+        
+if os.name == 'posix':
+
+    class unix_filesystem (os_filesystem):
+        pass
+        
+    class schizophrenic_unix_filesystem (os_filesystem):
+        PROCESS_UID		= os.getuid()
+        PROCESS_EUID	= os.geteuid()
+        PROCESS_GID		= os.getgid()
+        PROCESS_EGID	= os.getegid()
+        
+        def __init__ (self, root, wd='/', persona=(None, None)):
+            os_filesystem.__init__ (self, root, wd)
+            self.persona = persona
+            
+        def become_persona (self):
+            if self.persona is not (None, None):
+                uid, gid = self.persona
+                # the order of these is important!
+                os.setegid (gid)
+                os.seteuid (uid)
+                
+        def become_nobody (self):
+            if self.persona is not (None, None):
+                os.seteuid (self.PROCESS_UID)
+                os.setegid (self.PROCESS_GID)
+                
+                # cwd, cdup, open, listdir
+        def cwd (self, path):
+            try:
+                self.become_persona()
+                return os_filesystem.cwd (self, path)
+            finally:
+                self.become_nobody()
+                
+        def cdup (self, path):
+            try:
+                self.become_persona()
+                return os_filesystem.cdup (self)
+            finally:
+                self.become_nobody()
+                
+        def open (self, filename, mode):
+            try:
+                self.become_persona()
+                return os_filesystem.open (self, filename, mode)
+            finally:
+                self.become_nobody()
+                
+        def listdir (self, path, long=0):
+            try:
+                self.become_persona()
+                return os_filesystem.listdir (self, path, long)
+            finally:
+                self.become_nobody()
+                
+                # This hasn't been very reliable across different platforms.
+                # maybe think about a separate 'directory server'.
+                #
+                #	import posixpath
+                #	import fcntl
+                #	import FCNTL
+                #	import select
+                #	import asyncore
+                #
+                #	# pipes /bin/ls for directory listings.
+                #	class unix_filesystem (os_filesystem):
+                #		pass
+                # 		path_module = posixpath
+                #
+                # 		def listdir (self, path, long=0):
+                # 			p = self.translate (path)
+                # 			if not long:
+                # 				return list_producer (os.listdir (p), 0, None)
+                # 			else:
+                # 				command = '/bin/ls -l %s' % p
+                # 				print 'opening pipe to "%s"' % command
+                # 				fd = os.popen (command, 'rt')
+                # 				return pipe_channel (fd)
+                #
+                # 	# this is both a dispatcher, _and_ a producer
+                # 	class pipe_channel (asyncore.file_dispatcher):
+                # 		buffer_size = 4096
+                #
+                # 		def __init__ (self, fd):
+                # 			asyncore.file_dispatcher.__init__ (self, fd)
+                # 			self.fd = fd
+                # 			self.done = 0
+                # 			self.data = ''
+                #
+                # 		def handle_read (self):
+                # 			if len (self.data) < self.buffer_size:
+                # 				self.data = self.data + self.fd.read (self.buffer_size)
+                # 			#print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
+                #
+                # 		def handle_expt (self):
+                # 			#print '%s.handle_expt()' % self
+                # 			self.done = 1
+                #
+                # 		def ready (self):
+                # 			#print '%s.ready() => %d' % (self, len(self.data))
+                # 			return ((len (self.data) > 0) or self.done)
+                #
+                # 		def more (self):
+                # 			if self.data:
+                # 				r = self.data
+                # 				self.data = ''
+                # 			elif self.done:
+                # 				self.close()
+                # 				self.downstream.finished()
+                # 				r = ''
+                # 			else:
+                # 				r = None
+                # 			#print '%s.more() => %s' % (self, (r and len(r)))
+                # 			return r
+                
+                # For the 'real' root, we could obtain a list of drives, and then
+                # use that.  Doesn't win32 provide such a 'real' filesystem?
+                # [yes, I think something like this "\\.\c\windows"]
+                
+class msdos_filesystem (os_filesystem):
+    def longify (self, (path, stat_info)):
+        return msdos_longify (path, stat_info)
+        
+        # A merged filesystem will let you plug other filesystems together.
+        # We really need the equivalent of a 'mount' capability - this seems
+        # to be the most general idea.  So you'd use a 'mount' method to place
+        # another filesystem somewhere in the hierarchy.
+        
+        # Note: this is most likely how I will handle ~user directories
+        # with the http server.
+        
+class merged_filesystem:
+    def __init__ (self, *fsys):
+        pass
+        
+        # this matches the output of NT's ftp server (when in
+        # MSDOS mode) exactly.
+        
+def msdos_longify (file, stat_info):
+    if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+        dir = '<DIR>'
+    else:
+        dir = '     '
+    date = msdos_date (stat_info[stat.ST_MTIME])
+    return '%s       %s %8d %s' % (
+            date,
+            dir,
+            stat_info[stat.ST_SIZE],
+            file
+            )
+    
+def msdos_date (t):
+    try:
+        info = time.gmtime (t)
+    except:
+        info = time.gmtime (0)
+        # year, month, day, hour, minute, second, ...
+    if info[3] > 11:
+        merid = 'PM'
+        info[3] = info[3] - 12
+    else:
+        merid = 'AM'
+    return '%02d-%02d-%02d  %02d:%02d%s' % (
+            info[1],
+            info[2],
+            info[0]%100,
+            info[3],
+            info[4],
+            merid
+            )
+    
+months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+                  'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+mode_table = {
+        '0':'---',
+        '1':'--x',
+        '2':'-w-',
+        '3':'-wx',
+        '4':'r--',
+        '5':'r-x',
+        '6':'rw-',
+        '7':'rwx'
+        }
+
+import time
+
+def unix_longify (file, stat_info):
+        # for now, only pay attention to the lower bits
+    mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
+    mode = string.join (map (lambda x: mode_table[x], mode), '')
+    if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+        dirchar = 'd'
+    else:
+        dirchar = '-'
+    date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
+    user = str(stat_info[stat.ST_UID].replace(' ','_'))
+    group= str(stat_info[stat.ST_GID].replace(' ','_'))
+    if user=='System_Processes': user='Sysproc'
+    if group=='System_Processes': group='Sysproc'
+
+    return '%s%s %3d %-8s %-8s %8d %s %s' % (
+            dirchar,
+            mode,
+            stat_info[stat.ST_NLINK],
+            user,
+            group,
+            stat_info[stat.ST_SIZE],
+            date,
+            file
+            )
+    
+    # Emulate the unix 'ls' command's date field.
+    # it has two formats - if the date is more than 180
+    # days in the past, then it's like this:
+    # Oct 19  1995
+    # otherwise, it looks like this:
+    # Oct 19 17:33
+    
+def ls_date (now, t):
+    try:
+        info = time.gmtime (t)
+    except:
+        info = time.gmtime (0)
+        # 15,600,000 == 86,400 * 180
+    if (now - t) > 15600000:
+        return '%s %2d  %d' % (
+                months[info[1]-1],
+                info[2],
+                info[0]
+                )
+    else:
+        return '%s %2d %02d:%02d' % (
+                months[info[1]-1],
+                info[2],
+                info[3],
+                info[4]
+                )
+        
+        # ===========================================================================
+        # Producers
+        # ===========================================================================
+        
+class list_producer:
+    def __init__ (self, file_list, long, longify):
+        self.file_list = file_list
+        self.long = long
+        self.longify = longify
+        self.done = 0
+        
+    def ready (self):
+        if len(self.file_list):
+            return 1
+        else:
+            if not self.done:
+                self.done = 1
+            return 0
+        return (len(self.file_list) > 0)
+        
+        # this should do a pushd/popd
+    def more (self):
+        if not self.file_list:
+            return ''
+        else:
+                # do a few at a time
+            bunch = self.file_list[:50]
+            if self.long:
+                bunch = map (self.longify, bunch)
+            self.file_list = self.file_list[50:]
+            return string.joinfields (bunch, '\r\n') + '\r\n'
+            


=== Zope/lib/python/ZServer/medusa/ftp_server.py 1.22 => 1.23 === (1038/1138 lines abridged)
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/ftp_server.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,1135 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1996-2000 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID =  '$Id$'
+
+# An extensible, configurable, asynchronous FTP server.
+# 
+# All socket I/O is non-blocking, however file I/O is currently
+# blocking.  Eventually file I/O may be made non-blocking, too, if it
+# seems necessary.  Currently the only CPU-intensive operation is
+# getting and formatting a directory listing.  [this could be moved
+# into another process/directory server, or another thread?]
+#
+# Only a subset of RFC 959 is implemented, but much of that RFC is
+# vestigial anyway.  I've attempted to include the most commonly-used
+# commands, using the feature set of wu-ftpd as a guide.
+
+import asyncore
+import asynchat
+
+import os
+import socket
+import stat
+import string
+import sys
+import time
+
+# TODO: implement a directory listing cache.  On very-high-load
+# servers this could save a lot of disk abuse, and possibly the
+# work of computing emulated unix ls output.
+
+# Potential security problem with the FTP protocol?  I don't think
+# there's any verification of the origin of a data connection.  Not
+# really a problem for the server (since it doesn't send the port
+# command, except when in PASV mode) But I think a data connection
+# could be spoofed by a program with access to a sniffer - it could
+# watch for a PORT command to go over a command channel, and then
+# connect to that port before the server does.
+
+# Unix user id's:
+# In order to support assuming the id of a particular user,
+# it seems there are two options:
+# 1) fork, and seteuid in the child

[-=- -=- -=- 1038 lines omitted -=- -=- -=-]

+        # '!' requires write access
+        #
+command_documentation = {
+        'abor':	'abort previous command',							#*
+        'acct':	'specify account (ignored)',
+        'allo':	'allocate storage (vacuously)',
+        'appe':	'append to a file',									#*!
+        'cdup':	'change to parent of current working directory',	#*
+        'cwd':	'change working directory',							#*
+        'dele':	'delete a file',									#!
+        'help':	'give help information',							#*
+        'list':	'give list files in a directory',					#*
+        'mkd':	'make a directory',									#!
+        'mdtm':	'show last modification time of file',				#*
+        'mode':	'specify data transfer mode',
+        'nlst':	'give name list of files in directory',				#*
+        'noop':	'do nothing',										#*
+        'pass':	'specify password',									#*
+        'pasv':	'prepare for server-to-server transfer',			#*
+        'port':	'specify data connection port',						#*
+        'pwd':	'print the current working directory',				#*
+        'quit':	'terminate session',								#*
+        'rest':	'restart incomplete transfer',						#*
+        'retr':	'retrieve a file',									#*
+        'rmd':	'remove a directory',								#!
+        'rnfr':	'specify rename-from file name',					#!
+        'rnto':	'specify rename-to file name',						#!
+        'site':	'non-standard commands (see next section)',
+        'size':	'return size of file',								#*
+        'stat':	'return status of server',							#*
+        'stor':	'store a file',										#*!
+        'stou':	'store a file with a unique name',					#!
+        'stru':	'specify data transfer structure',
+        'syst':	'show operating system type of server system',		#*
+        'type':	'specify data transfer type',						#*
+        'user':	'specify user name',								#*
+        'xcup':	'change to parent of current working directory (deprecated)',
+        'xcwd':	'change working directory (deprecated)',
+        'xmkd':	'make a directory (deprecated)',					#!
+        'xpwd':	'print the current working directory (deprecated)',
+        'xrmd':	'remove a directory (deprecated)',					#!
+}
+
+
+# debugging aid (linux)
+def get_vm_size ():
+    return string.atoi (string.split(open ('/proc/self/stat').readline())[22])
+    
+def print_vm():
+    print 'vm: %8dk' % (get_vm_size()/1024)


=== Zope/lib/python/ZServer/medusa/http_bobo.py 1.5 => 1.6 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/http_bobo.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,75 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+import string
+import regex
+
+RCS_ID = '$Id$'
+VERSION_STRING = string.split(RCS_ID)[2]
+
+class bobo_extension:
+    hits = 0
+    
+    SERVER_IDENT = 'Bobo Extension (V%s)' % VERSION_STRING
+    
+    def __init__ (self, regexp):
+        self.regexp = regex.compile (regexp)
+        
+    def __repr__ (self):
+        return '<Bobo Extension <b>(%d hits)</b> at %x>' % (
+                self.hits,
+                id (self)
+                )
+        
+    def match (self, path_part):
+        if self.regexp.match (path_part) == len(path_part):
+            return 1
+        else:
+            return 0
+            
+    def status (self):
+        return mstatus.lines_producer ([
+                '<h2>%s</h2>'  						%self.SERVER_IDENT,
+                '<br><b>Total Hits:</b> %d'			% self.hits,
+                ])
+        
+    def handle_request (self, channel):
+        self.hits = self.hits + 1
+        
+        [path, params, query, fragment] = channel.uri
+        
+        if query:
+                # cgi_publisher_module doesn't want the leading '?'
+            query = query[1:]
+            
+        env = {}
+        env['REQUEST_METHOD']	= method
+        env['SERVER_PORT']		= channel.server.port
+        env['SERVER_NAME']		= channel.server.server_name
+        env['SCRIPT_NAME']		= module_name
+        env['QUERY_STRING']		= query
+        env['PATH_INFO']		= string.join (path_parts[1:],'/')
+        
+        # this should really be done with with a real producer.  just
+        # have to make sure it can handle all of the file object api.
+        
+        sin  = StringIO.StringIO('')
+        sout = StringIO.StringIO()
+        serr = StringIO.StringIO()
+        
+        cgi_module_publisher.publish_module (
+                module_name,
+                stdin=sin,
+                stdout=sout,
+                stderr=serr,
+                environ=env,
+                debug=1
+                )
+        
+        channel.push (
+                channel.response (200) + \
+                channel.generated_content_header (path)
+                )
+        
+        self.push (sout.getvalue())
+        self.push (serr.getvalue())
+        self.close_when_done()


=== Zope/lib/python/ZServer/medusa/http_date.py 1.10 => 1.11 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/http_date.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,134 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+import re
+import string
+import time
+
+def concat (*args):
+    return ''.join (args)	
+    
+def join (seq, field=' '):
+    return field.join (seq)
+    
+def group (s):
+    return '(' + s + ')'
+    
+short_days = ['sun','mon','tue','wed','thu','fri','sat']
+long_days = ['sunday','monday','tuesday','wednesday','thursday','friday','saturday']
+
+short_day_reg = group (join (short_days, '|'))
+long_day_reg = group (join (long_days, '|'))
+
+daymap = {}
+for i in range(7):
+    daymap[short_days[i]] = i
+    daymap[long_days[i]] = i
+    
+hms_reg = join (3 * [group('[0-9][0-9]')], ':')
+
+months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
+
+monmap = {}
+for i in range(12):
+    monmap[months[i]] = i+1
+    
+months_reg = group (join (months, '|'))
+
+# From draft-ietf-http-v11-spec-07.txt/3.3.1
+#       Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123
+#       Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+#       Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format
+
+# rfc822 format
+rfc822_date = join (
+        [concat (short_day_reg,','),	# day
+         group('[0-9][0-9]?'),			# date
+         months_reg,					# month
+         group('[0-9]+'),				# year
+         hms_reg,						# hour minute second
+         'gmt'
+         ],
+        ' '
+        )
+
+rfc822_reg = re.compile (rfc822_date)
+
+def unpack_rfc822 (m):
+    g = m.group
+    a = string.atoi
+    return (
+            a(g(4)),	   	# year
+            monmap[g(3)],	# month
+            a(g(2)),		# day
+            a(g(5)),		# hour
+            a(g(6)),		# minute
+            a(g(7)),		# second
+            0,
+            0,
+            0
+            )
+    
+    # rfc850 format
+rfc850_date = join (
+        [concat (long_day_reg,','),
+         join (
+                 [group ('[0-9][0-9]?'),
+                  months_reg,
+                  group ('[0-9]+')
+                  ],
+                 '-'
+                 ),
+         hms_reg,
+         'gmt'
+         ],
+        ' '
+        )
+
+rfc850_reg = re.compile (rfc850_date)
+# they actually unpack the same way
+def unpack_rfc850 (m):
+    g = m.group
+    a = string.atoi
+    return (
+            a(g(4)),	   	# year
+            monmap[g(3)],	# month
+            a(g(2)),		# day
+            a(g(5)),		# hour
+            a(g(6)),		# minute
+            a(g(7)),		# second
+            0,
+            0,
+            0
+            )
+    
+    # parsdate.parsedate	- ~700/sec.
+    # parse_http_date    	- ~1333/sec.
+    
+weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+             'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def build_http_date (when):
+    year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when)
+    return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+            weekdayname[wd],
+            day, monthname[month], year,
+            hh, mm, ss)
+
+def parse_http_date (d):
+    d = string.lower (d)
+    tz = time.timezone
+    m = rfc850_reg.match (d)
+    if m and m.end() == len(d):
+        retval = int (time.mktime (unpack_rfc850(m)) - tz)
+    else:
+        m = rfc822_reg.match (d)
+        if m and m.end() == len(d):
+            retval = int (time.mktime (unpack_rfc822(m)) - tz)
+        else:
+            return 0
+            # Thanks to Craig Silverstein <csilvers@google.com> for pointing
+            # out the DST discrepancy
+    if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
+        retval = retval + (tz - time.altzone)
+    return retval


=== Zope/lib/python/ZServer/medusa/http_server.py 1.34 => 1.35 === (729/829 lines abridged)
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/http_server.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,826 @@
+#! /usr/local/bin/python
+# -*- Mode: Python; tab-width: 4 -*-
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1996-2000 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID =  '$Id$'
+
+# python modules
+import os
+import re
+import socket
+import stat
+import string
+import sys
+import time
+import base64
+
+# async modules
+import asyncore
+import asynchat
+
+# medusa modules
+import http_date
+import producers
+import status_handler
+import logger
+
+if RCS_ID.startswith('$Id: '):
+    VERSION_STRING = string.split(RCS_ID)[2]
+else:
+    VERSION_STRING = '0.0'
+
+from counter import counter
+from urllib import unquote
+
+# ===========================================================================
+#							Request Object
+# ===========================================================================
+
+class http_request:
+
+        # default reply code
+    reply_code = 200
+    

[-=- -=- -=- 729 lines omitted -=- -=- -=-]

+        tz = -tz
+    h, rem = divmod (tz, 3600)
+    m, rem = divmod (rem, 60)
+    if neg:
+        return '-%02d%02d' % (h, m)
+    else:
+        return '+%02d%02d' % (h, m)
+        
+        # if you run this program over a TZ change boundary, this will be invalid.
+tz_for_log = compute_timezone_for_log()
+
+if __name__ == '__main__':
+    import sys
+    if len(sys.argv) < 2:
+        print 'usage: %s <root> <port>' % (sys.argv[0])
+    else:
+        import monitor
+        import filesys
+        import default_handler
+        import status_handler
+        import ftp_server
+        import chat_server
+        import resolver
+        import logger
+        rs = resolver.caching_resolver ('127.0.0.1')
+        lg = logger.file_logger (sys.stdout)
+        ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', 9999)
+        fs = filesys.os_filesystem (sys.argv[1])
+        dh = default_handler.default_handler (fs)
+        hs = http_server ('', string.atoi (sys.argv[2]), rs, lg)
+        hs.install_handler (dh)
+        ftp = ftp_server.ftp_server (
+                ftp_server.dummy_authorizer(sys.argv[1]),
+                port=8021,
+                resolver=rs,
+                logger_object=lg
+                )
+        cs = chat_server.chat_server ('', 7777)
+        sh = status_handler.status_extension([hs,ms,ftp,cs,rs])
+        hs.install_handler (sh)
+        if ('-p' in sys.argv):
+            def profile_loop ():
+                try:
+                    asyncore.loop()
+                except KeyboardInterrupt:
+                    pass
+            import profile
+            profile.run ('profile_loop()', 'profile.out')
+        else:
+            asyncore.loop()


=== Zope/lib/python/ZServer/medusa/logger.py 1.17 => 1.18 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/logger.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,275 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+import asynchat
+import socket
+import string
+import time         # these three are for the rotating logger
+import os           # |
+import stat         # v
+
+#
+# three types of log:
+# 1) file
+#    with optional flushing.  Also, one that rotates the log.
+# 2) socket
+#    dump output directly to a socket connection. [how do we
+#    keep it open?]
+# 3) syslog
+#    log to syslog via tcp.  this is a per-line protocol.
+#
+
+#
+# The 'standard' interface to a logging object is simply
+# log_object.log (message)
+#
+
+# a file-like object that captures output, and
+# makes sure to flush it always...  this could
+# be connected to:
+#  o	stdio file
+#  o	low-level file
+#  o	socket channel
+#  o	syslog output...
+
+class file_logger:
+
+        # pass this either a path or a file object.
+    def __init__ (self, file, flush=1, mode='a'):
+
+        self.filename = None
+
+        if type(file) == type(''):
+            if (file == '-'):
+                import sys
+                self.file = sys.stdout
+            else:
+                self.filename = file
+                self.file = open (file, mode)
+        else:
+            self.file = file
+        self.do_flush = flush
+        
+    def reopen(self):
+        if self.filename:
+            self.file.close()
+            self.file = open(self.filename,'a')            
+
+    def __repr__ (self):
+        return '<file logger: %s>' % self.file
+        
+    def write (self, data):
+        self.file.write (data)
+        self.maybe_flush()
+        
+    def writeline (self, line):
+        self.file.writeline (line)
+        self.maybe_flush()
+        
+    def writelines (self, lines):
+        self.file.writelines (lines)
+        self.maybe_flush()
+        
+    def maybe_flush (self):
+        if self.do_flush:
+            self.file.flush()
+            
+    def flush (self):
+        self.file.flush()
+        
+    def softspace (self, *args):
+        pass
+        
+    def log (self, message):
+        if message[-1] not in ('\r', '\n'):
+            self.write (message + '\n')
+        else:
+            self.write (message)
+            
+            # like a file_logger, but it must be attached to a filename.
+            # When the log gets too full, or a certain time has passed,
+            # it backs up the log and starts a new one.  Note that backing
+            # up the log is done via "mv" because anything else (cp, gzip)
+            # would take time, during which medusa would do nothing else.
+            
+class rotating_file_logger (file_logger):
+
+        # If freq is non-None we back up "daily", "weekly", or "monthly".
+        # Else if maxsize is non-None we back up whenever the log gets
+        # to big.  If both are None we never back up.
+    def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
+        self.filename = file
+        self.mode = mode
+        self.file = open (file, mode)
+        self.freq = freq
+        self.maxsize = maxsize
+        self.rotate_when = self.next_backup(self.freq)
+        self.do_flush = flush
+        
+    def __repr__ (self):
+        return '<rotating-file logger: %s>' % self.file
+        
+        # We back up at midnight every 1) day, 2) monday, or 3) 1st of month
+    def next_backup (self, freq):
+        (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+        if freq == 'daily':
+            return time.mktime((yr,mo,day+1, 0,0,0, 0,0,-1))
+        elif freq == 'weekly':
+            return time.mktime((yr,mo,day-wd+7, 0,0,0, 0,0,-1))  # wd(monday)==0
+        elif freq == 'monthly':
+            return time.mktime((yr,mo+1,1, 0,0,0, 0,0,-1))
+        else:
+            return None                  # not a date-based backup
+            
+    def maybe_flush (self):              # rotate first if necessary
+        self.maybe_rotate()
+        if self.do_flush:                # from file_logger()
+            self.file.flush()
+            
+    def maybe_rotate (self):
+        if self.freq and time.time() > self.rotate_when:
+            self.rotate()
+            self.rotate_when = self.next_backup(self.freq)
+        elif self.maxsize:               # rotate when we get too big
+            try:
+                if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
+                    self.rotate()
+            except os.error:             # file not found, probably
+                self.rotate()            # will create a new file
+                
+    def rotate (self):
+        (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+        try:
+            self.file.close()
+            newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
+            try:
+                open(newname, "r").close()      # check if file exists
+                newname = newname + "-%02d%02d%02d" % (hr, min, sec)
+            except:                             # YEARMODY is unique
+                pass
+            os.rename(self.filename, newname)
+            self.file = open(self.filename, self.mode)
+        except:
+            pass
+            
+            # syslog is a line-oriented log protocol - this class would be
+            # appropriate for FTP or HTTP logs, but not for dumping stderr to.
+            
+            # TODO: a simple safety wrapper that will ensure that the line sent
+            # to syslog is reasonable.
+            
+            # TODO: async version of syslog_client: now, log entries use blocking
+            # send()
+            
+import m_syslog
+syslog_logger = m_syslog.syslog_client
+
+class syslog_logger (m_syslog.syslog_client):
+
+    svc_name = 'medusa'
+    pid_str  = str(os.getpid())
+
+    def __init__ (self, address, facility='user'):
+        m_syslog.syslog_client.__init__ (self, address)
+        self.facility = m_syslog.facility_names[facility]
+        self.address=address
+        
+    def __repr__ (self):
+        return '<syslog logger address=%s>' % (repr(self.address))
+        
+    def log (self, message):
+        m_syslog.syslog_client.log (
+            self,
+            '%s[%s]: %s' % (self.svc_name, self.pid_str, message),
+            facility=self.facility,
+            priority=m_syslog.LOG_INFO
+            )
+        
+        # log to a stream socket, asynchronously
+        
+class socket_logger (asynchat.async_chat):
+
+    def __init__ (self, address):
+    
+        if type(address) == type(''):
+            self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
+        else:
+            self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+            
+        self.connect (address)
+        self.address = address
+        
+    def __repr__ (self):
+        return '<socket logger: address=%s>' % (self.address)
+        
+    def log (self, message):
+        if message[-2:] != '\r\n':
+            self.socket.push (message + '\r\n')
+        else:
+            self.socket.push (message)
+            
+            # log to multiple places
+class multi_logger:
+    def __init__ (self, loggers):
+        self.loggers = loggers
+        
+    def __repr__ (self):
+        return '<multi logger: %s>' % (repr(self.loggers))
+        
+    def log (self, message):
+        for logger in self.loggers:
+            logger.log (message)
+            
+class resolving_logger:
+    """Feed (ip, message) combinations into this logger to get a
+    resolved hostname in front of the message.  The message will not
+    be logged until the PTR request finishes (or fails)."""
+    
+    def __init__ (self, resolver, logger):
+        self.resolver = resolver
+        self.logger = logger
+        
+    class logger_thunk:
+        def __init__ (self, message, logger):
+            self.message = message
+            self.logger = logger
+            
+        def __call__ (self, host, ttl, answer):
+            if not answer:
+                answer = host
+            self.logger.log ('%s%s' % (answer, self.message))
+            
+    def log (self, ip, message):
+        self.resolver.resolve_ptr (
+                ip,
+                self.logger_thunk (
+                        message,
+                        self.logger
+                        )
+                )
+        
+class unresolving_logger:
+    "Just in case you don't want to resolve"
+    def __init__ (self, logger):
+        self.logger = logger
+        
+    def log (self, ip, message):
+        self.logger.log ('%s %s' % (ip, message))
+        
+        
+def strip_eol (line):
+    while line and line[-1] in '\r\n':
+        line = line[:-1]
+    return line
+    
+class tail_logger:
+    "Keep track of the last <size> log messages"
+    def __init__ (self, logger, size=500):
+        self.size = size
+        self.logger = logger
+        self.messages = []
+        
+    def log (self, message):
+        self.messages.append (strip_eol (message))
+        if len (self.messages) > self.size:
+            del self.messages[0]
+        self.logger.log (message)


=== Zope/lib/python/ZServer/medusa/m_syslog.py 1.13 => 1.14 ===
--- /dev/null	Tue Mar 18 16:15:48 2003
+++ Zope/lib/python/ZServer/medusa/m_syslog.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,181 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# ======================================================================
+# Copyright 1997 by Sam Rushing
+# 
+#                         All Rights Reserved
+# 
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+# 
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""socket interface to unix syslog.
+On Unix, there are usually two ways of getting to syslog: via a
+local unix-domain socket, or via the TCP service.
+
+Usually "/dev/log" is the unix domain socket.  This may be different
+for other systems.
+
+>>> my_client = syslog_client ('/dev/log')
+
+Otherwise, just use the UDP version, port 514.
+
+>>> my_client = syslog_client (('my_log_host', 514))
+
+On win32, you will have to use the UDP version.  Note that
+you can use this to log to other hosts (and indeed, multiple
+hosts).
+
+This module is not a drop-in replacement for the python
+<syslog> extension module - the interface is different.
+
+Usage:
+
+>>> c = syslog_client()
+>>> c = syslog_client ('/strange/non_standard_log_location')
+>>> c = syslog_client (('other_host.com', 514))
+>>> c.log ('testing', facility='local0', priority='debug')
+
+"""
+
+# TODO: support named-pipe syslog.
+# [see ftp://sunsite.unc.edu/pub/Linux/system/Daemons/syslog-fifo.tar.z]
+
+# from <linux/sys/syslog.h>:
+# ===========================================================================
+# priorities/facilities are encoded into a single 32-bit quantity, where the
+# bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
+# (0-big number).  Both the priorities and the facilities map roughly
+# one-to-one to strings in the syslogd(8) source code.  This mapping is
+# included in this file.
+#
+# priorities (these are ordered)
+
+LOG_EMERG		= 0		#  system is unusable 
+LOG_ALERT		= 1		#  action must be taken immediately 
+LOG_CRIT		= 2		#  critical conditions 
+LOG_ERR			= 3		#  error conditions 
+LOG_WARNING		= 4		#  warning conditions 
+LOG_NOTICE		= 5		#  normal but significant condition 
+LOG_INFO		= 6		#  informational 
+LOG_DEBUG		= 7		#  debug-level messages 
+
+#  facility codes 
+LOG_KERN		= 0		#  kernel messages 
+LOG_USER		= 1		#  random user-level messages 
+LOG_MAIL		= 2		#  mail system 
+LOG_DAEMON		= 3		#  system daemons 
+LOG_AUTH		= 4		#  security/authorization messages 
+LOG_SYSLOG		= 5		#  messages generated internally by syslogd 
+LOG_LPR			= 6		#  line printer subsystem 
+LOG_NEWS		= 7		#  network news subsystem 
+LOG_UUCP		= 8		#  UUCP subsystem 
+LOG_CRON		= 9		#  clock daemon 
+LOG_AUTHPRIV	= 10	#  security/authorization messages (private) 
+
+#  other codes through 15 reserved for system use 
+LOG_LOCAL0		= 16		#  reserved for local use 
+LOG_LOCAL1		= 17		#  reserved for local use 
+LOG_LOCAL2		= 18		#  reserved for local use 
+LOG_LOCAL3		= 19		#  reserved for local use 
+LOG_LOCAL4		= 20		#  reserved for local use 
+LOG_LOCAL5		= 21		#  reserved for local use 
+LOG_LOCAL6		= 22		#  reserved for local use 
+LOG_LOCAL7		= 23		#  reserved for local use 
+
+priority_names = {
+        "alert":	LOG_ALERT,
+        "crit":		LOG_CRIT,
+        "debug":	LOG_DEBUG,
+        "emerg":	LOG_EMERG,
+        "err":		LOG_ERR,
+        "error":	LOG_ERR,		#  DEPRECATED 
+        "info":		LOG_INFO,
+        "notice":	LOG_NOTICE,
+        "panic": 	LOG_EMERG,		#  DEPRECATED 
+        "warn":		LOG_WARNING,		#  DEPRECATED 
+        "warning":	LOG_WARNING,
+        }
+
+facility_names = {
+        "auth":		LOG_AUTH,
+        "authpriv":	LOG_AUTHPRIV,
+        "cron": 	LOG_CRON,
+        "daemon":	LOG_DAEMON,
+        "kern":		LOG_KERN,
+        "lpr":		LOG_LPR,
+        "mail":		LOG_MAIL,
+        "news":		LOG_NEWS,
+        "security":	LOG_AUTH,		#  DEPRECATED 
+        "syslog":	LOG_SYSLOG,
+        "user":		LOG_USER,
+        "uucp":		LOG_UUCP,
+        "local0":	LOG_LOCAL0,
+        "local1":	LOG_LOCAL1,
+        "local2":	LOG_LOCAL2,
+        "local3":	LOG_LOCAL3,
+        "local4":	LOG_LOCAL4,
+        "local5":	LOG_LOCAL5,
+        "local6":	LOG_LOCAL6,
+        "local7":	LOG_LOCAL7,
+        }
+
+import socket
+
+class syslog_client:
+
+    def __init__ (self, address='/dev/log'):
+        self.address = address
+        if type (address) == type(''):
+            try: # APUE 13.4.2 specifes /dev/log as datagram socket
+                self.socket = socket.socket( socket.AF_UNIX
+                                                       , socket.SOCK_DGRAM)
+                self.socket.connect (address)
+            except: # older linux may create as stream socket
+                self.socket = socket.socket( socket.AF_UNIX
+                                                       , socket.SOCK_STREAM)
+                self.socket.connect (address)
+            self.unix = 1
+        else:
+            self.socket = socket.socket( socket.AF_INET
+                                                   , socket.SOCK_DGRAM)
+            self.unix = 0
+
+            
+    log_format_string = '<%d>%s\000'
+    
+    def log (self, message, facility=LOG_USER, priority=LOG_INFO):
+        message = self.log_format_string % (
+                self.encode_priority (facility, priority),
+                message
+                )
+        if self.unix:
+            self.socket.send (message)
+        else:
+            self.socket.sendto (message, self.address)
+            
+    def encode_priority (self, facility, priority):
+        if type(facility) == type(''):
+            facility = facility_names[facility]
+        if type(priority) == type(''):
+            priority = priority_names[priority]			
+        return (facility<<3) | priority
+        
+    def close (self):
+        if self.unix:
+            self.socket.close()
+            


=== Zope/lib/python/ZServer/medusa/medusa.html 1.3 => 1.4 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/medusa.html	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,290 @@
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
+<html>
+<head>
+<title>Medusa: A High-Performance Internet Server Architecture</title>
+</head>
+<body>
+<h1> <b>Medusa</b>: A High-Performance Internet Server Architecture </h1>
+
+<h2> What is Medusa? </h2>
+
+Medusa is an architecture for high-performance, robust, long-running
+TCP/IP servers (like HTTP, FTP, and NNTP).  Medusa differs from most
+other server architectures in that it runs as a single process,
+multiplexing I/O with its various client and server connections within
+a single process/thread.
+
+<p>
+
+Medusa  is written in  <a  href="http://www.python.org/">Python</a>, a
+high-level  object-oriented language that  is particularly well suited
+to building powerful, extensible servers.   Medusa can be extended and
+modified at  run-time, even  by the end-user.    User 'scripts' can be
+used to completely change the behavior of the server,  and even add in
+completely new server types.
+
+<h2> How Does it Work? </h2>
+
+Most Internet servers  are built on a 'forking'  model.  ('Fork' is  a
+Unix term  for starting a new  process.)  Such servers actually invoke
+an  entire  new  process  for every  single  client connection.   This
+approach is  simple  to implement,  but does  not  scale  very well to
+high-load situations.  Lots of clients  mean a lot of processes, which
+gobble up    large  quantities of virtual    memory   and other system
+resources.  A  high-load server thus needs  to  have a lot  of memory.
+Many  popular Internet servers  are running with hundreds of megabytes
+of memory.
+
+<p>
+<h3>The I/O bottleneck. </h3>
+<p>
+      
+The vast  majority of  Internet servers  are I/O bound   - for any one
+process,  the CPU is sitting idle  99.9%  of the time, usually waiting
+for input from an external device (in  the case of an Internet server,
+it  is waiting   for  input   from  the  network).   This  problem  is
+exacerbated by the imbalance between server and client bandwidth: most
+clients are connecting at relatively low bandwidths (28.8 kbits/sec or
+less, with network delays and inefficiencies it can be far lower).  To
+a typical server  CPU, the time between  bytes for such a client seems
+like an  eternity!  (Consider that a 200  Mhz CPU can  perform roughly
+50,000 operations for each byte received from such a client).
+      
+<p>
+
+A simple metaphor for a 'forking' server is that of a supermarket
+cashier: for every 'customer' being processed [at a cash register],
+another 'person' must be created to handle each client session.  But
+what if your checkout clerks were so fast they could each individually
+handle hundreds of customers per second?  Since these clerks are
+almost always waiting for a customer to come through their line, you
+have a very large staff, sitting around idle 99.9% of the time!  Why
+not replace this staff with a single <i> super-clerk </i>, flitting
+from aisle to aisle ?
+
+<p>
+
+This is exactly how Medusa works!  It multiplexes all its I/O through
+a single select() loop - this loop can handle hundreds, even thousands
+of simultaneous connections - the actual number is limited only by your
+operating system.  For a more technical overview, see
+<a href="http://www.nightmare.com/medusa/async_sockets.html">
+Asynchronous Socket Programming</a>
+
+<h2> Why is it Better? </h2>
+
+<h3> Performance </h3>
+<p>
+
+The most obvious advantage to a single long-running server process is
+a dramatic improvement in performance.  There are several types of
+overhead involved in the forking model:
+<ul>
+  <li> <b> Process creation/destruction. </b>
+  <p>
+
+  Starting up a new process is an expensive operation on any operating
+  system.  Virtual memory must be allocated, libraries must be
+  initialized, and the operating system now has yet another task to
+  keep track of.  This start-up cost is so high that it is actually
+  <i>noticeable</i> to people!  For example, the first time you pull
+  up a web page with 15 inline images, while you are waiting for the
+  page to load you may have created and destroyed at least 16
+  processes on the web server.
+
+  <p>
+  <li> <b> Virtual Memory </b>
+  <p>
+
+  Each process also requires a certain  amount of virtual memory space
+  to be  allocated on its  behalf.  Even though most operating systems
+  implement a 'copy-on-write'    strategy that makes this  much   less
+  costly than it could be,  the end result is still  very wasteful.  A
+  100-user FTP server can  still easily require hundreds  of megabytes
+  of real  memory in order  to avoid thrashing (excess paging activity
+  due to lack of real memory).
+
+</ul>
+
+  <b>Medusa</b> eliminates  both  types  of  overhead.  Running  as  a
+  single   process,   there   is   no per-client  creation/destruction
+  overhead.  This means each client request  is answered very quickly.
+  And virtual memory  requirements  are lowered dramatically.   Memory
+  requirements can even be controlled with  more precision in order to
+  gain  the  highest performance  possible   for a particular  machine
+  configuration.
+
+<h3> Persistence </h3>
+<p>
+
+Another major advantage to the single-process model is
+<i>persistence</i>.  Often it is necessary to maintain some sort of
+state information that is available to each and every client, i.e., a
+database connection or file pointer.  Forking-model servers that need
+such shared state must arrange some method of getting it - usually via
+an IPC (inter-process communication) mechanism such as sockets or
+named pipes.  IPC itself adds yet another significant and needless
+overhead - single-process servers can simply share such information
+within a single address space.
+
+<p>
+
+Implementing persistence in Medusa is easy - the address space of its
+process (and thus its open database handles, variables, etc...) is
+available to each and every client.
+
+<h3> Not a Strawman </h3>
+
+All right, at this point many of my readers will say I'm beating up on
+a strawman.  In fact, they will say, such server architectures are
+already available - like Microsoft's Internet Information Server.
+IIS avoids the above-named problems by using <i>threads</i>.  Threads
+are 'lightweight processes' - they represent multiple concurrent
+execution paths within a single address space.  Threads solve many of
+the problems mentioned above, but also create new ones:
+
+  <ul>
+    <li>'Threaded' programs are very difficult to write - especially
+        with servers that want to utilize the 'persistence' feature -
+        great care must be taken when accessing or modifying shared resources.
+    <li>There is still additional system overhead when using threads.
+    <li>Not all operating systems support threads, and even on those
+        that do, it is difficult to use them in a portable fashion.
+  </ul>
+
+  <p>   Threads  are  <i>required</i>  in  only a    limited number of
+  situations.  In many    cases where  threads  seem  appropriate,  an
+  asynchronous  solution can actually  be  written with less work, and
+  will perform better.  Avoiding the use of  threads also makes access
+  to  shared resources (like  database  connections) easier to manage,
+  since multi-user locking is not necessary.
+
+  <p> <b>Note:</b> In the rare case where threads are actually
+  necessary, Medusa can of course use them, if the host operating system
+  supports them.  For example, an image-conversion or fractal-generating
+  server might be CPU-intensive, rather than I/O-bound, and thus a good
+  candidate for running in a separate thread.
+
+<p>
+Another solution  (used by many  current  HTTP servers on Unix)  is to
+'pre-spawn' a large number of processes - clients are attached to each
+server  in  turn.  Although  this  alleviates  the performance problem
+<i>up to that number  of users</i>, it still  does not scale well.  To
+reliably and efficiently handle <i>[n]</i> users, <i>[n]</i> processes
+are still necessary.
+
+<h3> Other Advantages </h3>
+  <ul>
+    <li> <b>Extensibility</b>
+    <p>
+
+      Since Medusa is written in Python, it  is easily extensible.  No
+      separate compilation is necessary.  New facilities can be loaded
+      and  unloaded into  the   server without  any  recompilation  or
+      linking, even while the server is running.  [For example, Medusa
+      can be configured to automatically upgrade  itself to the latest
+      version every so often].
+
+      <p>
+    <li> <b> Security </b>
+      <p>
+      
+      Many  of the  most popular  security holes  (popular, at  least,
+      among the mischievous) exploit the fact that servers are usually
+      written in a low-level language.  Unless such languages are used
+      with extreme care,  weaknesses  can be introduced that  are very
+      difficult    to  predict  and    control.  One  of  the favorite
+      loop-holes is the 'memory buffer overflow', used by the Internet
+      Worm (and many others)   to gain unwarranted access to  Internet
+      servers.
+  
+  </ul>
+    <p>
+  
+      Such  problems  are  virtually non-existent  when   working in a
+      high-level language like Python, where for example all access to
+      variables and their components are checked at run-time for valid
+      range operations.   Even unforseen errors  and operating  system
+      bugs can  be caught -  Python includes a full exception-handling
+      system  which  promotes the  construction of  'highly available'
+      servers.  Rather  than crashing  the entire server,  Medusa will
+      usually inform the user, log the error, and keep right on running.
+
+<h2> Current Features </h2>
+
+<ul>
+    <li>  <p>  The  currently  available version  of   Medusa includes
+    integrated World   Wide   Web  (<b>HTTP</b>)  and  file   transfer
+    (<b>FTP</b>)  servers.   This combined server    can solve a major
+    performance  problem at any    high-load  site, by replacing   two
+    forking servers  with a single  non-forking, non-threading server.
+    Multiple servers of each type can also be instantiated. <p>
+
+    <li> <p> Also  included is  a secure 'remote-control'  capability,
+    called  a <b>monitor</b>  server.    With   this server   enabled,
+    authorized users can 'log in' to the  running server, and control,
+    manipulate, and examine   the server  <i>   while it is    running
+    </i>. <p>
+
+    <li> <p> A 'chat server' is included, as a sample server
+    implementation.  It's simple enough to serve as a good
+    introduction to extending Medusa.  It implements a simple IRC-like
+    chat service that could easily be integrated with the HTTP server
+    for an integrated web-oriented chat service.  [For example, a
+    small Java applet could be used on the client end to communicate
+    with the server].
+    <p>
+
+    <li> <p> Several extensions are available for the HTTP server, and
+    more will become available over time.  Each of these extensions can
+    be loaded/unloaded into the server dynamically.<p>
+        
+    <dl>
+    
+        <dt> <b> Status Extension </b> <dd> Provides status
+        information via the HTTP server.  Can report on any or all of
+        the installed servers, and on the extensions loaded into the
+        HTTP server.  [If this server is running Medusa, you should be
+        able to see it <a href="/status">here</a>]
+    
+        <dt> <b> Default Extension </b> <dd> Provides the 'standard'
+        file-delivery http server behavior.  Uses the same abstract
+        filesystem object as the FTP server.  Supports the HTTP/1.1
+        persistent connection via the 'Connection: Keep-Alive' header.
+
+	<dt> <b> HTTP Proxy Extension </b> <dd> Act as a proxy server for HTTP
+        requests.  This lets Medusa  be  used as a 'Firewall'  server.
+        Plans for this  extension include cache support, filtering (to
+        ignore,           say,      all            images         from
+        'http://obnoxious.blinky.advertisements.com/'),       logging,
+        etc...
+  
+        <dt> <b> Planned </b> <dd> On the drawing board are pseudo-filesystem
+        extensions, access to databases like mSQL and Oracle, (and on Windows
+        via ODBC), authentication, server-side includes, and a full-blown
+	proxy/cache system for both HTTP and FTP.  Feedback from users will
+	help me decide which areas to concentrate on, so please email me any
+        suggestions.
+
+    </dl>
+    
+    <p> <li> An API is evolving for users to  extend not just the HTTP
+    server but Medusa as a whole, mixing in other server types and new
+    capabilities  into existing  servers.  NNTP and  POP3 servers have
+    already been written, and will probably  be provided as an add-on.
+    I am actively encouraging other developers to produce (and if they
+    wish, to market) Medusa extensions.
+
+</ul>
+
+<h2> Where Can I Get It? </h2>
+
+<p>
+Medusa is available from <a
+href="http://www.nightmare.com/medusa/">http://www.nightmare.com/medusa</a>
+<p> Feedback, both positive and negative, is much appreciated; please send
+email to <a
+href="mailto:rushing@nightmare.com">rushing@nightmare.com</a>.
+
+</body>
+</html>


=== Zope/lib/python/ZServer/medusa/medusa_gif.py 1.7 => 1.8 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/medusa_gif.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,8 @@
+# -*- Mode: Python -*-
+
+# the medusa icon as a python source file.
+
+width = 97
+height = 61
+
+data = 'GIF89aa\000=\000\204\000\000\000\000\000\255\255\255\245\245\245ssskkkccc111)))\326\326\326!!!\316\316\316\300\300\300\204\204\000\224\224\224\214\214\214\200\200\200RRR\377\377\377JJJ\367\367\367BBB\347\347\347\000\204\000\020\020\020\265\265\265\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000!\371\004\001\000\000\021\000,\000\000\000\000a\000=\000\000\005\376`$\216di\236h\252\256l\353\276p,\317tm\337x\256\357|m\001@\240E\305\000\364\2164\206R)$\005\201\214\007r\012{X\255\312a\004\260\\>\026\3240\353)\224n\001W+X\334\373\231~\344.\303b\216\024\027x<\273\307\255G,rJiWN\014{S}k"?ti\013EdPQ\207G@_%\000\026yy\\\201\202\227\224<\221Fs$pOjWz\241<r@vO\236\231\233k\247M\2544\203F\177\235\236L#\247\256Z\270,\266BxJ[\276\256A]iE\304\305\262\273E\313\201\275i#\\\303\321\'h\203V\\\177\326\276\216\220P~\335\230_\264\013\342\275\344KF\233\360Q\212\352\246\000\367\274s\361\236\334\347T\341;\341\246\2202\177\3142\211`\242o\325@S\202\264\031\252\!
207\260\323\256\205\311\036\236\270\002\'\013\302\177\274H\010\324X\002\0176\212\037\376\321\360\032\226\207\244\2674(+^\202\346r\205J\0211\375\241Y#\256f\0127\315>\272\002\325\307g\012(\007\205\312#j\317(\012A\200\224.\241\003\346GS\247\033\245\344\264\366\015L\'PXQl]\266\263\243\232\260?\245\316\371\362\225\035\332\243J\273\332Q\263\357-D\241T\327\270\265\013W&\330\010u\371b\322IW0\214\261]\003\033Va\365Z#\207\213a\030k\2647\262\014p\354\024[n\321N\363\346\317\003\037P\000\235C\302\000\3228(\244\363YaA\005\022\255_\237@\260\000A\212\326\256qbp\321\332\266\011\334=T\023\010"!B\005\003A\010\224\020\220 H\002\337#\020 O\276E\357h\221\327\003\\\000b@v\004\351A.h\365\354\342B\002\011\257\025\\ \220\340\301\353\006\000\024\214\200pA\300\353\012\364\241k/\340\033C\202\003\000\310fZ\011\003V\240R\005\007\354\376\026A\000\000\360\'\202\177\024\004\210\003\000\305\215\360\000\000\015\220\240\332\203\027@\'\202\004\025VpA\000%\210x\321\206\032J\341\316\010\262\211H"l\333\341\200\200>!
"]P\002\212\011\010`\002\0066FP\200\001\'\024p]\004\027(8B\221\306]\000\201w>\002iB\001\007\340\260"v7J1\343(\257\020\251\243\011\242i\263\017\215\337\035\220\200\221\365m4d\015\016D\251\341iN\354\346Ng\253\200I\240\031\35609\245\2057\311I\302\2007t\231"&`\314\310\244\011e\226(\236\010w\212\300\234\011\012HX(\214\253\311@\001\233^\222pg{% \340\035\224&H\000\246\201\362\215`@\001"L\340\004\030\234\022\250\'\015(V:\302\235\030\240q\337\205\224\212h@\177\006\000\250\210\004\007\310\207\337\005\257-P\346\257\367]p\353\203\271\256:\203\236\211F\340\247\010\3329g\244\010\307*=A\000\203\260y\012\304s#\014\007D\207,N\007\304\265\027\021C\233\207%B\366[m\353\006\006\034j\360\306+\357\274a\204\000\000;'


=== Zope/lib/python/ZServer/medusa/mime_type_table.py 1.7 => 1.8 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/mime_type_table.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,113 @@
+# -*- Python -*-
+# Converted by ./convert_mime_type_table.py from:
+# /usr/src2/apache_1.2b6/conf/mime.types
+#
+content_type_map = \
+  {
+        'ai':  'application/postscript',
+       'aif':  'audio/x-aiff',
+      'aifc':  'audio/x-aiff',
+      'aiff':  'audio/x-aiff',
+        'au':  'audio/basic',
+       'avi':  'video/x-msvideo',
+     'bcpio':  'application/x-bcpio',
+       'bin':  'application/octet-stream',
+       'cdf':  'application/x-netcdf',
+     'class':  'application/octet-stream',
+      'cpio':  'application/x-cpio',
+       'cpt':  'application/mac-compactpro',
+       'csh':  'application/x-csh',
+       'dcr':  'application/x-director',
+       'dir':  'application/x-director',
+       'dms':  'application/octet-stream',
+       'doc':  'application/msword',
+       'dvi':  'application/x-dvi',
+       'dxr':  'application/x-director',
+       'eps':  'application/postscript',
+       'etx':  'text/x-setext',
+       'exe':  'application/octet-stream',
+       'gif':  'image/gif',
+      'gtar':  'application/x-gtar',
+        'gz':  'application/x-gzip',
+       'hdf':  'application/x-hdf',
+       'hqx':  'application/mac-binhex40',
+       'htm':  'text/html',
+      'html':  'text/html',
+       'ice':  'x-conference/x-cooltalk',
+       'ief':  'image/ief',
+       'jpe':  'image/jpeg',
+      'jpeg':  'image/jpeg',
+       'jpg':  'image/jpeg',
+       'kar':  'audio/midi',
+     'latex':  'application/x-latex',
+       'lha':  'application/octet-stream',
+       'lzh':  'application/octet-stream',
+       'man':  'application/x-troff-man',
+        'me':  'application/x-troff-me',
+       'mid':  'audio/midi',
+      'midi':  'audio/midi',
+       'mif':  'application/x-mif',
+       'mov':  'video/quicktime',
+     'movie':  'video/x-sgi-movie',
+       'mp2':  'audio/mpeg',
+       'mpe':  'video/mpeg',
+      'mpeg':  'video/mpeg',
+       'mpg':  'video/mpeg',
+      'mpga':  'audio/mpeg',
+       'mp3':  'audio/mpeg',
+        'ms':  'application/x-troff-ms',
+        'nc':  'application/x-netcdf',
+       'oda':  'application/oda',
+       'pbm':  'image/x-portable-bitmap',
+       'pdb':  'chemical/x-pdb',
+       'pdf':  'application/pdf',
+       'pgm':  'image/x-portable-graymap',
+       'png':  'image/png',
+       'pnm':  'image/x-portable-anymap',
+       'ppm':  'image/x-portable-pixmap',
+       'ppt':  'application/powerpoint',
+        'ps':  'application/postscript',
+        'qt':  'video/quicktime',
+        'ra':  'audio/x-realaudio',
+       'ram':  'audio/x-pn-realaudio',
+       'ras':  'image/x-cmu-raster',
+       'rgb':  'image/x-rgb',
+      'roff':  'application/x-troff',
+       'rpm':  'audio/x-pn-realaudio-plugin',
+       'rtf':  'application/rtf',
+       'rtx':  'text/richtext',
+       'sgm':  'text/x-sgml',
+      'sgml':  'text/x-sgml',
+        'sh':  'application/x-sh',
+      'shar':  'application/x-shar',
+       'sit':  'application/x-stuffit',
+       'skd':  'application/x-koan',
+       'skm':  'application/x-koan',
+       'skp':  'application/x-koan',
+       'skt':  'application/x-koan',
+       'snd':  'audio/basic',
+       'src':  'application/x-wais-source',
+   'sv4cpio':  'application/x-sv4cpio',
+    'sv4crc':  'application/x-sv4crc',
+         't':  'application/x-troff',
+       'tar':  'application/x-tar',
+       'tcl':  'application/x-tcl',
+       'tex':  'application/x-tex',
+      'texi':  'application/x-texinfo',
+   'texinfo':  'application/x-texinfo',
+       'tif':  'image/tiff',
+      'tiff':  'image/tiff',
+        'tr':  'application/x-troff',
+       'tsv':  'text/tab-separated-values',
+       'txt':  'text/plain',
+     'ustar':  'application/x-ustar',
+       'vcd':  'application/x-cdlink',
+      'vrml':  'x-world/x-vrml',
+       'wav':  'audio/x-wav',
+       'wrl':  'x-world/x-vrml',
+       'xbm':  'image/x-xbitmap',
+       'xpm':  'image/x-xpixmap',
+       'xwd':  'image/x-xwindowdump',
+       'xyz':  'chemical/x-pdb',
+       'zip':  'application/zip',
+  }


=== Zope/lib/python/ZServer/medusa/monitor.py 1.14 => 1.15 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/monitor.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,353 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#	Author: Sam Rushing <rushing@nightmare.com>
+
+#
+# python REPL channel.
+#
+
+RCS_ID = '$Id$'
+
+import md5
+import socket
+import string
+import sys
+import time
+
+if RCS_ID.startswith('$Id: '):
+    VERSION = string.split(RCS_ID)[2]
+else:
+    VERSION = '0.0'
+
+import asyncore
+import asynchat
+
+from counter import counter
+import producers
+
+class monitor_channel (asynchat.async_chat):
+    try_linemode = 1
+    
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        # local bindings specific to this channel
+        self.local_env = sys.modules['__main__'].__dict__.copy()
+        self.push ('Python ' + sys.version + '\r\n')
+        self.push (sys.copyright+'\r\n')
+        self.push ('Welcome to %s\r\n' % self)
+        self.push ("[Hint: try 'from __main__ import *']\r\n")
+        self.prompt()
+        self.number = server.total_sessions.as_long()
+        self.line_counter = counter()
+        self.multi_line = []
+        
+    def handle_connect (self):
+            # send IAC DO LINEMODE
+        self.push ('\377\375\"')
+        
+    def close (self):
+        self.server.closed_sessions.increment()
+        asynchat.async_chat.close(self)
+        
+    def prompt (self):
+        self.push ('>>> ')
+        
+    def collect_incoming_data (self, data):
+        self.data = self.data + data
+        if len(self.data) > 1024:
+                # denial of service.
+            self.push ('BCNU\r\n')
+            self.close_when_done()
+            
+    def found_terminator (self):
+        line = self.clean_line (self.data)
+        self.data = ''
+        self.line_counter.increment()
+        # check for special case inputs...
+        if not line and not self.multi_line:
+            self.prompt()
+            return
+        if line in ['\004', 'exit']:
+            self.push ('BCNU\r\n')
+            self.close_when_done()
+            return
+        oldout = sys.stdout
+        olderr = sys.stderr
+        try:
+            p = output_producer(self, olderr)
+            sys.stdout = p
+            sys.stderr = p
+            try:
+                    # this is, of course, a blocking operation.
+                    # if you wanted to thread this, you would have
+                    # to synchronize, etc... and treat the output
+                    # like a pipe.  Not Fun.
+                    #
+                    # try eval first.  If that fails, try exec.  If that fails,
+                    # hurl.
+                try:
+                    if self.multi_line:
+                            # oh, this is horrible...
+                        raise SyntaxError
+                    co = compile (line, repr(self), 'eval')
+                    result = eval (co, self.local_env)
+                    method = 'eval'
+                    if result is not None:
+                        print repr(result)
+                    self.local_env['_'] = result
+                except SyntaxError:
+                    try:
+                        if self.multi_line:
+                            if line and line[0] in [' ','\t']:
+                                self.multi_line.append (line)
+                                self.push ('... ')
+                                return
+                            else:
+                                self.multi_line.append (line)
+                                line =	string.join (self.multi_line, '\n')
+                                co = compile (line, repr(self), 'exec')
+                                self.multi_line = []
+                        else:
+                            co = compile (line, repr(self), 'exec')
+                    except SyntaxError, why:
+                        if why[0] == 'unexpected EOF while parsing':
+                            self.push ('... ')
+                            self.multi_line.append (line)
+                            return
+                        else:
+                            t,v,tb = sys.exc_info()
+                            del tb
+                            raise t,v
+                    exec co in self.local_env
+                    method = 'exec'
+            except:
+                method = 'exception'
+                self.multi_line = []
+                (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
+                self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
+        finally:
+            sys.stdout = oldout
+            sys.stderr = olderr
+        self.log_info('%s:%s (%s)> %s' % (
+                self.number,
+                self.line_counter,
+                method,
+                repr(line))
+                )
+        self.push_with_producer (p)
+        self.prompt()
+        
+        # for now, we ignore any telnet option stuff sent to
+        # us, and we process the backspace key ourselves.
+        # gee, it would be fun to write a full-blown line-editing
+        # environment, etc...
+    def clean_line (self, line):
+        chars = []
+        for ch in line:
+            oc = ord(ch)
+            if oc < 127:
+                if oc in [8,177]:
+                        # backspace
+                    chars = chars[:-1]
+                else:
+                    chars.append (ch)
+        return string.join (chars, '')
+        
+class monitor_server (asyncore.dispatcher):
+
+    SERVER_IDENT = 'Monitor Server (V%s)' % VERSION
+    
+    channel_class = monitor_channel
+    
+    def __init__ (self, hostname='127.0.0.1', port=8023):
+        self.hostname = hostname
+        self.port = port
+        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind ((hostname, port))
+        self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
+        self.listen (5)
+        self.closed		= 0
+        self.failed_auths = 0
+        self.total_sessions = counter()
+        self.closed_sessions = counter()
+        
+    def writable (self):
+        return 0
+        
+    def handle_accept (self):
+        conn, addr = self.accept()
+        self.log_info('Incoming monitor connection from %s:%d' % addr)
+        self.channel_class (self, conn, addr)
+        self.total_sessions.increment()
+        
+    def status (self):
+        return producers.simple_producer (
+                '<h2>%s</h2>'						% self.SERVER_IDENT
+                + '<br><b>Total Sessions:</b> %s'		% self.total_sessions
+                + '<br><b>Current Sessions:</b> %d'	% (
+                        self.total_sessions.as_long()-self.closed_sessions.as_long()
+                        )
+                )
+        
+def hex_digest (s):
+    m = md5.md5()
+    m.update (s)
+    return string.joinfields (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
+class secure_monitor_channel (monitor_channel):
+    authorized = 0
+    
+    def __init__ (self, server, sock, addr):
+        asynchat.async_chat.__init__ (self, sock)
+        self.server = server
+        self.addr = addr
+        self.set_terminator ('\r\n')
+        self.data = ''
+        # local bindings specific to this channel
+        self.local_env = {}
+        # send timestamp string
+        self.timestamp = str(time.time())
+        self.count = 0
+        self.line_counter = counter()
+        self.number = int(server.total_sessions.as_long())
+        self.multi_line = []
+        self.push (self.timestamp + '\r\n')
+        
+    def found_terminator (self):
+        if not self.authorized:
+            if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:
+                self.log_info ('%s: failed authorization' % self, 'warning')
+                self.server.failed_auths = self.server.failed_auths + 1
+                self.close()
+            else:
+                self.authorized = 1
+                self.push ('Python ' + sys.version + '\r\n')
+                self.push (sys.copyright+'\r\n')
+                self.push ('Welcome to %s\r\n' % self)
+                self.prompt()
+                self.data = ''
+        else:
+            monitor_channel.found_terminator (self)
+            
+class secure_encrypted_monitor_channel (secure_monitor_channel):
+    "Wrap send() and recv() with a stream cipher"
+    
+    def __init__ (self, server, conn, addr):
+        key = server.password
+        self.outgoing = server.cipher.new (key)
+        self.incoming = server.cipher.new (key)
+        secure_monitor_channel.__init__ (self, server, conn, addr)
+        
+    def send (self, data):
+            # send the encrypted data instead
+        ed = self.outgoing.encrypt (data)
+        return secure_monitor_channel.send (self, ed)
+        
+    def recv (self, block_size):
+        data = secure_monitor_channel.recv (self, block_size)
+        if data:
+            dd = self.incoming.decrypt (data)
+            return dd
+        else:
+            return data
+            
+class secure_monitor_server (monitor_server):
+    channel_class = secure_monitor_channel
+    
+    def __init__ (self, password, hostname='', port=8023):
+        monitor_server.__init__ (self, hostname, port)
+        self.password = password
+        
+    def status (self):
+        p = monitor_server.status (self)
+        # kludge
+        p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)
+        return p
+        
+        # don't try to print from within any of the methods
+        # of this object. 8^)
+        
+class output_producer:
+    def __init__ (self, channel, real_stderr):
+        self.channel = channel
+        self.data = ''
+        # use _this_ for debug output
+        self.stderr = real_stderr
+        
+    def check_data (self):
+        if len(self.data) > 1<<16:
+                # runaway output, close it.
+            self.channel.close()
+            
+    def write (self, data):
+        lines = string.splitfields (data, '\n')
+        data = string.join (lines, '\r\n')
+        self.data = self.data + data
+        self.check_data()
+        
+    def writeline (self, line):
+        self.data = self.data + line + '\r\n'
+        self.check_data()
+        
+    def writelines (self, lines):
+        self.data = self.data + string.joinfields (
+                lines,
+                '\r\n'
+                ) + '\r\n'
+        self.check_data()
+        
+    def ready (self):
+        return (len (self.data) > 0)
+        
+    def flush (self):
+        pass
+        
+    def softspace (self, *args):
+        pass
+        
+    def more (self):
+        if self.data:
+            result = self.data[:512]
+            self.data = self.data[512:]
+            return result
+        else:
+            return ''
+            
+if __name__ == '__main__':
+    import string
+    import sys
+    if '-s' in sys.argv:
+        sys.argv.remove ('-s')
+        print 'Enter password: ',
+        password = raw_input()
+    else:
+        password = None
+        
+    if '-e' in sys.argv:
+        sys.argv.remove ('-e')
+        encrypt = 1
+    else:
+        encrypt = 0
+        
+    if len(sys.argv) > 1:
+        port = string.atoi (sys.argv[1])
+    else:
+        port = 8023
+        
+    if password is not None:
+        s = secure_monitor_server (password, '', port)
+        if encrypt:
+            s.channel_class = secure_encrypted_monitor_channel
+            import sapphire
+            s.cipher = sapphire
+    else:
+        s = monitor_server ('', port)
+        
+    asyncore.loop(use_poll=1)


=== Zope/lib/python/ZServer/medusa/monitor_client.py 1.9 => 1.10 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/monitor_client.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,126 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# monitor client, unix version.
+
+import asyncore
+import asynchat
+import regsub
+import socket
+import string
+import sys
+import os
+
+import md5
+import time
+
+class stdin_channel (asyncore.file_dispatcher):
+    def handle_read (self):
+        data = self.recv(512)
+        if not data:
+            print '\nclosed.'
+            self.sock_channel.close()
+            try:
+                self.close()
+            except:
+                pass
+                
+        data = regsub.gsub ('\n', '\r\n', data)
+        self.sock_channel.push (data)
+        
+    def writable (self):
+        return 0
+        
+    def log (self, *ignore):
+        pass
+        
+class monitor_client (asynchat.async_chat):
+    def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
+        asynchat.async_chat.__init__ (self)
+        self.create_socket (socket_type, socket.SOCK_STREAM)
+        self.terminator = '\r\n'
+        self.connect (addr)
+        self.sent_auth = 0
+        self.timestamp = ''
+        self.password = password
+        
+    def collect_incoming_data (self, data):
+        if not self.sent_auth:
+            self.timestamp = self.timestamp + data
+        else:
+            sys.stdout.write (data)
+            sys.stdout.flush()
+            
+    def found_terminator (self):
+        if not self.sent_auth:
+            self.push (hex_digest (self.timestamp + self.password) + '\r\n')
+            self.sent_auth = 1
+        else:
+            print
+            
+    def handle_close (self):
+            # close all the channels, which will make the standard main
+            # loop exit.
+        map (lambda x: x.close(), asyncore.socket_map.values())
+        
+    def log (self, *ignore):
+        pass
+        
+class encrypted_monitor_client (monitor_client):
+    "Wrap push() and recv() with a stream cipher"
+    
+    def init_cipher (self, cipher, key):
+        self.outgoing = cipher.new (key)
+        self.incoming = cipher.new (key)
+        
+    def push (self, data):
+            # push the encrypted data instead
+        return monitor_client.push (self, self.outgoing.encrypt (data))
+        
+    def recv (self, block_size):
+        data = monitor_client.recv (self, block_size)
+        if data:
+            return self.incoming.decrypt (data)
+        else:
+            return data
+            
+def hex_digest (s):
+    m = md5.md5()
+    m.update (s)
+    return string.join (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
+if __name__ == '__main__':
+    if len(sys.argv) == 1:
+        print 'Usage: %s host port' % sys.argv[0]
+        sys.exit(0)
+        
+    if ('-e' in sys.argv):
+        encrypt = 1
+        sys.argv.remove ('-e')
+    else:
+        encrypt = 0
+        
+    sys.stderr.write ('Enter Password: ')
+    sys.stderr.flush()
+    import os
+    try:
+        os.system ('stty -echo')
+        p = raw_input()
+        print
+    finally:
+        os.system ('stty echo')
+    stdin = stdin_channel (0)
+    if len(sys.argv) > 1:
+        if encrypt:
+            client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
+            import sapphire
+            client.init_cipher (sapphire, p)
+        else:
+            client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
+    else:
+            # default to local host, 'standard' port
+        client = monitor_client (p)
+    stdin.sock_channel = client
+    asyncore.loop()


=== Zope/lib/python/ZServer/medusa/monitor_client_win32.py 1.8 => 1.9 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/monitor_client_win32.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,53 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+# monitor client, win32 version
+
+# since we can't do select() on stdin/stdout, we simply
+# use threads and blocking sockets.  <sigh>
+
+import regsub
+import socket
+import string
+import sys
+import thread
+import md5
+
+def hex_digest (s):
+    m = md5.md5()
+    m.update (s)
+    return string.join (
+            map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
+            '',
+            )
+    
+def reader (lock, sock, password):
+        # first grab the timestamp
+    ts = sock.recv (1024)[:-2]
+    sock.send (hex_digest (ts+password) + '\r\n')
+    while 1:
+        d = sock.recv (1024)
+        if not d:
+            lock.release()
+            print 'Connection closed.  Hit <return> to exit'
+            thread.exit()
+        sys.stdout.write (d)
+        sys.stdout.flush()
+        
+def writer (lock, sock, barrel="just kidding"):
+    while lock.locked():
+        sock.send (
+                sys.stdin.readline()[:-1] + '\r\n'
+                )
+        
+if __name__ == '__main__':
+    if len(sys.argv) == 1:
+        print 'Usage: %s host port'
+        sys.exit(0)
+    print 'Enter Password: ',
+    p = raw_input()
+    s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
+    s.connect ((sys.argv[1], string.atoi(sys.argv[2])))
+    l = thread.allocate_lock()
+    l.acquire()
+    thread.start_new_thread (reader, (l, s, p))
+    writer (l, s)


=== Zope/lib/python/ZServer/medusa/producers.py 1.11 => 1.12 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/producers.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,331 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+RCS_ID = '$Id$'
+
+import string
+
+"""
+A collection of producers.
+Each producer implements a particular feature:  They can be combined
+in various ways to get interesting and useful behaviors.
+
+For example, you can feed dynamically-produced output into the compressing
+producer, then wrap this with the 'chunked' transfer-encoding producer.
+"""
+
+class simple_producer:
+    "producer for a string"
+    def __init__ (self, data, buffer_size=1024):
+        self.data = data
+        self.buffer_size = buffer_size
+        
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+            
+class scanning_producer:
+    "like simple_producer, but more efficient for large strings"
+    def __init__ (self, data, buffer_size=1024):
+        self.data = data
+        self.buffer_size = buffer_size
+        self.pos = 0
+        
+    def more (self):
+        if self.pos < len(self.data):
+            lp = self.pos
+            rp = min (
+                    len(self.data),
+                    self.pos + self.buffer_size
+                    )
+            result = self.data[lp:rp]
+            self.pos = self.pos + len(result)
+            return result
+        else:
+            return ''
+            
+class lines_producer:
+    "producer for a list of lines"
+    
+    def __init__ (self, lines):
+        self.lines = lines
+        
+    def ready (self):
+        return len(self.lines)
+        
+    def more (self):
+        if self.lines:
+            chunk = self.lines[:50]
+            self.lines = self.lines[50:]
+            return string.join (chunk, '\r\n') + '\r\n'
+        else:
+            return ''
+            
+class buffer_list_producer:
+    "producer for a list of buffers"
+    
+    # i.e., data == string.join (buffers, '')
+    
+    def __init__ (self, buffers):
+    
+        self.index = 0
+        self.buffers = buffers
+        
+    def more (self):
+        if self.index >= len(self.buffers):
+            return ''
+        else:
+            data = self.buffers[self.index]
+            self.index = self.index + 1
+            return data
+            
+class file_producer:
+    "producer wrapper for file[-like] objects"
+    
+    # match http_channel's outgoing buffer size
+    out_buffer_size = 1<<16
+    
+    def __init__ (self, file):
+        self.done = 0
+        self.file = file
+        
+    def more (self):
+        if self.done:
+            return ''
+        else:
+            data = self.file.read (self.out_buffer_size)
+            if not data:
+                self.file.close()
+                del self.file
+                self.done = 1
+                return ''
+            else:
+                return data
+                
+                # A simple output producer.  This one does not [yet] have
+                # the safety feature builtin to the monitor channel:  runaway
+                # output will not be caught.
+                
+                # don't try to print from within any of the methods
+                # of this object.
+                
+class output_producer:
+    "Acts like an output file; suitable for capturing sys.stdout"
+    def __init__ (self):
+        self.data = ''
+        
+    def write (self, data):
+        lines = string.splitfields (data, '\n')
+        data = string.join (lines, '\r\n')
+        self.data = self.data + data
+        
+    def writeline (self, line):
+        self.data = self.data + line + '\r\n'
+        
+    def writelines (self, lines):
+        self.data = self.data + string.joinfields (
+                lines,
+                '\r\n'
+                ) + '\r\n'
+        
+    def ready (self):
+        return (len (self.data) > 0)
+        
+    def flush (self):
+        pass
+        
+    def softspace (self, *args):
+        pass
+        
+    def more (self):
+        if self.data:
+            result = self.data[:512]
+            self.data = self.data[512:]
+            return result
+        else:
+            return ''
+            
+class composite_producer:
+    "combine a fifo of producers into one"
+    def __init__ (self, producers):
+        self.producers = producers
+        
+    def more (self):
+        while len(self.producers):
+            p = self.producers.first()
+            d = p.more()
+            if d:
+                return d
+            else:
+                self.producers.pop()
+        else:
+            return ''
+            
+            
+class globbing_producer:
+    """
+    'glob' the output from a producer into a particular buffer size.
+    helps reduce the number of calls to send().  [this appears to
+    gain about 30% performance on requests to a single channel]
+    """
+    
+    def __init__ (self, producer, buffer_size=1<<16):
+        self.producer = producer
+        self.buffer = ''
+        self.buffer_size = buffer_size
+        
+    def more (self):
+        while len(self.buffer) < self.buffer_size:
+            data = self.producer.more()
+            if data:
+                self.buffer = self.buffer + data
+            else:
+                break
+        r = self.buffer
+        self.buffer = ''
+        return r
+        
+        
+class hooked_producer:
+    """
+    A producer that will call <function> when it empties,.
+    with an argument of the number of bytes produced.  Useful
+    for logging/instrumentation purposes.
+    """
+    
+    def __init__ (self, producer, function):
+        self.producer = producer
+        self.function = function
+        self.bytes = 0
+        
+    def more (self):
+        if self.producer:
+            result = self.producer.more()
+            if not result:
+                self.producer = None
+                self.function (self.bytes)
+            else:
+                self.bytes = self.bytes + len(result)
+            return result
+        else:
+            return ''
+            
+            # HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
+            # correct.  In the face of Strange Files, it is conceivable that
+            # reading a 'file' may produce an amount of data not matching that
+            # reported by os.stat() [text/binary mode issues, perhaps the file is
+            # being appended to, etc..]  This makes the chunked encoding a True
+            # Blessing, and it really ought to be used even with normal files.
+            # How beautifully it blends with the concept of the producer.
+            
+class chunked_producer:
+    """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
+    Here is a sample usage:
+            request['Transfer-Encoding'] = 'chunked'
+            request.push (
+                    producers.chunked_producer (your_producer)
+                    )
+            request.done()
+    """
+    
+    def __init__ (self, producer, footers=None):
+        self.producer = producer
+        self.footers = footers
+        
+    def more (self):
+        if self.producer:
+            data = self.producer.more()
+            if data:
+                return '%x\r\n%s\r\n' % (len(data), data)
+            else:
+                self.producer = None
+                if self.footers:
+                    return string.join (
+                            ['0'] + self.footers,
+                            '\r\n'
+                            ) + '\r\n\r\n'
+                else:
+                    return '0\r\n\r\n'
+        else:
+            return ''
+            
+            # Unfortunately this isn't very useful right now (Aug 97), because
+            # apparently the browsers don't do on-the-fly decompression.  Which
+            # is sad, because this could _really_ speed things up, especially for
+            # low-bandwidth clients (i.e., most everyone).
+            
+try:
+    import zlib
+except ImportError:
+    zlib = None
+    
+class compressed_producer:
+    """
+    Compress another producer on-the-fly, using ZLIB
+    [Unfortunately, none of the current browsers seem to support this]
+    """
+    
+    # Note: It's not very efficient to have the server repeatedly
+    # compressing your outgoing files: compress them ahead of time, or
+    # use a compress-once-and-store scheme.  However, if you have low
+    # bandwidth and low traffic, this may make more sense than
+    # maintaining your source files compressed.
+    #
+    # Can also be used for compressing dynamically-produced output.
+    
+    def __init__ (self, producer, level=5):
+        self.producer = producer
+        self.compressor = zlib.compressobj (level)
+        
+    def more (self):
+        if self.producer:
+            cdata = ''
+            # feed until we get some output
+            while not cdata:
+                data = self.producer.more()
+                if not data:
+                    self.producer = None
+                    return self.compressor.flush()
+                else:
+                    cdata = self.compressor.compress (data)
+            return cdata
+        else:
+            return ''
+            
+class escaping_producer:
+
+    "A producer that escapes a sequence of characters"
+    " Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
+    
+    def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
+        self.producer = producer
+        self.esc_from = esc_from
+        self.esc_to = esc_to
+        self.buffer = ''
+        from asynchat import find_prefix_at_end
+        self.find_prefix_at_end = find_prefix_at_end
+        
+    def more (self):
+        esc_from = self.esc_from
+        esc_to   = self.esc_to
+        
+        buffer = self.buffer + self.producer.more()
+        
+        if buffer:
+            buffer = string.replace (buffer, esc_from, esc_to)
+            i = self.find_prefix_at_end (buffer, esc_from)
+            if i:
+                    # we found a prefix
+                self.buffer = buffer[-i:]
+                return buffer[:-i]
+            else:
+                    # no prefix, return it all
+                self.buffer = ''
+                return buffer
+        else:
+            return buffer


=== Zope/lib/python/ZServer/medusa/put_handler.py 1.4 => 1.5 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/put_handler.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,115 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1996-2000 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID =  '$Id$'
+
+import re
+import string
+
+import default_handler
+unquote		= default_handler.unquote
+get_header	= default_handler.get_header
+
+last_request = None
+
+class put_handler:
+    def __init__ (self, filesystem, uri_regex):
+        self.filesystem = filesystem
+        if type (uri_regex) == type(''):
+            self.uri_regex = re.compile (uri_regex)
+        else:
+            self.uri_regex = uri_regex
+            
+    def match (self, request):
+        uri = request.uri
+        if request.command == 'put':
+            m = self.uri_regex.match (uri)
+            if m and m.end() == len(uri):
+                return 1
+        return 0
+        
+    def handle_request (self, request):
+    
+        path, params, query, fragment = request.split_uri()
+        
+        # strip off leading slashes
+        while path and path[0] == '/':
+            path = path[1:]
+            
+        if '%' in path:
+            path = unquote (path)
+            
+            # make sure there's a content-length header
+        cl = get_header (CONTENT_LENGTH, request.header)
+        if not cl:
+            request.error (411)
+            return
+        else:
+            cl = string.atoi (cl)
+            
+            # don't let the try to overwrite a directory
+        if self.filesystem.isdir (path):
+            request.error (405)
+            return
+            
+        is_update = self.filesystem.isfile (path)
+        
+        try:
+            output_file = self.filesystem.open (path, 'wb')
+        except:
+            request.error (405)
+            return
+            
+        request.collector = put_collector (output_file, cl, request, is_update)
+        
+        # no terminator while receiving PUT data
+        request.channel.set_terminator (None)
+        
+        # don't respond yet, wait until we've received the data...
+        
+class put_collector:
+    def __init__ (self, file, length, request, is_update):
+        self.file		= file
+        self.length		= length
+        self.request	= request
+        self.is_update	= is_update
+        self.bytes_in	= 0
+        
+    def collect_incoming_data (self, data):
+        ld = len(data)
+        bi = self.bytes_in
+        if (bi + ld) >= self.length:
+                # last bit of data
+            chunk = self.length - bi
+            self.file.write (data[:chunk])
+            self.file.close()
+            
+            if chunk != ld:
+                print 'orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:]))
+                
+                # do some housekeeping
+            r = self.request
+            ch = r.channel
+            ch.current_request = None
+            # set the terminator back to the default
+            ch.set_terminator ('\r\n\r\n')
+            if self.is_update:
+                r.reply_code = 204 # No content
+                r.done()
+            else:
+                r.reply_now (201) # Created
+                # avoid circular reference
+            del self.request
+        else:
+            self.file.write (data)
+            self.bytes_in = self.bytes_in + ld
+            
+    def found_terminator (self):
+            # shouldn't be called
+        pass
+        
+CONTENT_LENGTH = re.compile ('Content-Length: ([0-9]+)', re.IGNORECASE)


=== Zope/lib/python/ZServer/medusa/redirecting_handler.py 1.4 => 1.5 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/redirecting_handler.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,46 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#	Copyright 1996-2000 by Sam Rushing
+#						 All Rights Reserved.
+#
+
+RCS_ID =  '$Id$'
+
+import re
+import counter
+
+class redirecting_handler:
+
+    def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
+        self.pattern = pattern
+        self.redirect = redirect
+        self.patreg = re.compile (pattern, regex_flag)
+        self.hits = counter.counter()
+        
+    def match (self, request):
+        m = self.patref.match (request.uri)
+        return (m and (m.end() == len(request.uri)))
+        
+    def handle_request (self, request):
+        self.hits.increment()
+        m = self.patreg.match (request.uri)
+        part = m.group(1)
+        
+        request['Location'] = self.redirect % part
+        request.error (302) # moved temporarily
+        
+    def __repr__ (self):
+        return '<Redirecting Handler at %08x [%s => %s]>' % (
+                id(self),
+                repr(self.pattern),
+                repr(self.redirect)
+                )
+        
+    def status (self):
+        import producers
+        return producers.simple_producer (
+                '<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
+                        self.pattern, self.redirect, self.hits
+                        )
+                )


=== Zope/lib/python/ZServer/medusa/resolver.py 1.11 => 1.12 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/resolver.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,445 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+#
+#	Author: Sam Rushing <rushing@nightmare.com>
+#
+
+RCS_ID =  '$Id$'
+
+
+# Fast, low-overhead asynchronous name resolver.  uses 'pre-cooked'
+# DNS requests, unpacks only as much as it needs of the reply.
+
+# see rfc1035 for details
+
+import string
+import asyncore
+import socket
+import sys
+import time
+from counter import counter
+
+if RCS_ID.startswith('$Id: '):
+    VERSION = string.split(RCS_ID)[2]
+else:
+    VERSION = '0.0'
+
+# header
+#                                    1  1  1  1  1  1
+#      0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                      ID                       |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |QR|   Opcode  |AA|TC|RD|RA|   Z    |   RCODE   |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                    QDCOUNT                    |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                    ANCOUNT                    |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                    NSCOUNT                    |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                    ARCOUNT                    |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+
+# question
+#                                    1  1  1  1  1  1
+#      0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                                               |
+#    /                     QNAME                     /
+#    /                                               /
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                     QTYPE                     |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#    |                     QCLASS                    |
+#    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+# build a DNS address request, _quickly_
+def fast_address_request (host, id=0):
+    return (
+            '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
+            + '\001\000\000\001\000\000\000\000\000\000%s\000\000\001\000\001' % (
+                    string.join (
+                            map (
+                                    lambda part: '%c%s' % (chr(len(part)),part),
+                                    string.split (host, '.')
+                                    ), ''
+                            )
+                    )
+            )
+    
+def fast_ptr_request (host, id=0):
+    return (
+            '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))
+            + '\001\000\000\001\000\000\000\000\000\000%s\000\000\014\000\001' % (
+                    string.join (
+                            map (
+                                    lambda part: '%c%s' % (chr(len(part)),part),
+                                    string.split (host, '.')
+                                    ), ''
+                            )
+                    )
+            )
+    
+def unpack_name (r,pos):
+    n = []
+    while 1:
+        ll = ord(r[pos])
+        if (ll&0xc0):
+                # compression
+            pos = (ll&0x3f << 8) + (ord(r[pos+1]))
+        elif ll == 0:
+            break			
+        else:
+            pos = pos + 1
+            n.append (r[pos:pos+ll])
+            pos = pos + ll
+    return string.join (n,'.')
+    
+def skip_name (r,pos):
+    s = pos
+    while 1:
+        ll = ord(r[pos])
+        if (ll&0xc0):
+                # compression
+            return pos + 2
+        elif ll == 0:
+            pos = pos + 1
+            break
+        else:
+            pos = pos + ll + 1
+    return pos
+    
+def unpack_ttl (r,pos):
+    return reduce (
+            lambda x,y: (x<<8)|y,
+            map (ord, r[pos:pos+4])
+            )
+    
+    # resource record
+    #                                    1  1  1  1  1  1
+    #      0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                                               |
+    #    /                                               /
+    #    /                      NAME                     /
+    #    |                                               |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                      TYPE                     |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                     CLASS                     |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                      TTL                      |
+    #    |                                               |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    #    |                   RDLENGTH                    |
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+    #    /                     RDATA                     /
+    #    /                                               /
+    #    +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+    
+def unpack_address_reply (r):
+    ancount = (ord(r[6])<<8) + (ord(r[7]))
+    # skip question, first name starts at 12,
+    # this is followed by QTYPE and QCLASS
+    pos = skip_name (r, 12) + 4
+    if ancount:
+            # we are looking very specifically for
+            # an answer with TYPE=A, CLASS=IN (\000\001\000\001)
+        for an in range(ancount):
+            pos = skip_name (r, pos)
+            if r[pos:pos+4] == '\000\001\000\001':
+                return (
+                        unpack_ttl (r,pos+4),
+                        '%d.%d.%d.%d' % tuple(map(ord,r[pos+10:pos+14]))
+                        )
+                # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
+            pos = pos + 8
+            rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
+            pos = pos + 2 + rdlength
+        return 0, None
+    else:
+        return 0, None
+        
+def unpack_ptr_reply (r):
+    ancount = (ord(r[6])<<8) + (ord(r[7]))
+    # skip question, first name starts at 12,
+    # this is followed by QTYPE and QCLASS
+    pos = skip_name (r, 12) + 4
+    if ancount:
+            # we are looking very specifically for
+            # an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
+        for an in range(ancount):
+            pos = skip_name (r, pos)
+            if r[pos:pos+4] == '\000\014\000\001':
+                return (
+                        unpack_ttl (r,pos+4),
+                        unpack_name (r, pos+10)
+                        )
+                # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
+            pos = pos + 8
+            rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))
+            pos = pos + 2 + rdlength
+        return 0, None
+    else:
+        return 0, None
+        
+        
+        # This is a UDP (datagram) resolver.
+        
+        #
+        # It may be useful to implement a TCP resolver.  This would presumably
+        # give us more reliable behavior when things get too busy.  A TCP
+        # client would have to manage the connection carefully, since the
+        # server is allowed to close it at will (the RFC recommends closing
+        # after 2 minutes of idle time).
+        #
+        # Note also that the TCP client will have to prepend each request
+        # with a 2-byte length indicator (see rfc1035).
+        #
+        
+class resolver (asyncore.dispatcher):
+    id = counter()
+    def __init__ (self, server='127.0.0.1'):
+        asyncore.dispatcher.__init__ (self)
+        self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)
+        self.server = server
+        self.request_map = {}
+        self.last_reap_time = int(time.time())      # reap every few minutes
+        
+    def writable (self):
+        return 0
+        
+    def log (self, *args):
+        pass
+        
+    def handle_close (self):
+        self.log_info('closing!')
+        self.close()
+        
+    def handle_error (self):      # don't close the connection on error
+        (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+        self.log_info(
+                        'Problem with DNS lookup (%s:%s %s)' % (t, v, tbinfo),
+                        'error')
+        
+    def get_id (self):
+        return (self.id.as_long() % (1<<16))
+        
+    def reap (self):          # find DNS requests that have timed out
+        now = int(time.time())
+        if now - self.last_reap_time > 180:        # reap every 3 minutes
+            self.last_reap_time = now              # update before we forget
+            for k,(host,unpack,callback,when) in self.request_map.items():
+                if now - when > 180:               # over 3 minutes old
+                    del self.request_map[k]
+                    try:                           # same code as in handle_read
+                        callback (host, 0, None)   # timeout val is (0,None) 
+                    except:
+                        (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+                        self.log_info('%s %s %s' % (t,v,tbinfo), 'error')
+                        
+    def resolve (self, host, callback):
+        self.reap()                                # first, get rid of old guys
+        self.socket.sendto (
+                fast_address_request (host, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = (
+                host, unpack_address_reply, callback, int(time.time()))
+        self.id.increment()
+        
+    def resolve_ptr (self, host, callback):
+        self.reap()                                # first, get rid of old guys
+        ip = string.split (host, '.')
+        ip.reverse()
+        ip = string.join (ip, '.') + '.in-addr.arpa'
+        self.socket.sendto (
+                fast_ptr_request (ip, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = (
+                host, unpack_ptr_reply, callback, int(time.time()))
+        self.id.increment()
+        
+    def handle_read (self):
+        reply, whence = self.socket.recvfrom (512)
+        # for security reasons we may want to double-check
+        # that <whence> is the server we sent the request to.
+        id = (ord(reply[0])<<8) + ord(reply[1])
+        if self.request_map.has_key (id):
+            host, unpack, callback, when = self.request_map[id]
+            del self.request_map[id]
+            ttl, answer = unpack (reply)
+            try:
+                callback (host, ttl, answer)
+            except:
+                (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
+                self.log_info('%s %s %s' % ( t,v,tbinfo), 'error')
+                
+class rbl (resolver):
+
+    def resolve_maps (self, host, callback):
+        ip = string.split (host, '.')
+        ip.reverse()
+        ip = string.join (ip, '.') + '.rbl.maps.vix.com'
+        self.socket.sendto (
+                fast_ptr_request (ip, self.get_id()),
+                (self.server, 53)
+                )
+        self.request_map [self.get_id()] = host, self.check_reply, callback
+        self.id.increment()
+        
+    def check_reply (self, r):
+            # we only need to check RCODE.
+        rcode = (ord(r[3])&0xf)
+        self.log_info('MAPS RBL; RCODE =%02x\n %s' % (rcode, repr(r)))
+        return 0, rcode # (ttl, answer)
+        
+        
+class hooked_callback:
+    def __init__ (self, hook, callback):
+        self.hook, self.callback = hook, callback
+        
+    def __call__ (self, *args):
+        apply (self.hook, args)
+        apply (self.callback, args)
+        
+class caching_resolver (resolver):
+    "Cache DNS queries.  Will need to honor the TTL value in the replies"
+    
+    def __init__ (*args):
+        apply (resolver.__init__, args)
+        self = args[0]
+        self.cache = {}
+        self.forward_requests = counter()
+        self.reverse_requests = counter()
+        self.cache_hits = counter()
+        
+    def resolve (self, host, callback):
+        self.forward_requests.increment()
+        if self.cache.has_key (host):
+            when, ttl, answer = self.cache[host]
+            # ignore TTL for now
+            callback (host, ttl, answer)
+            self.cache_hits.increment()
+        else:
+            resolver.resolve (
+                    self,
+                    host,
+                    hooked_callback (
+                            self.callback_hook,
+                            callback
+                            )
+                    )
+            
+    def resolve_ptr (self, host, callback):
+        self.reverse_requests.increment()
+        if self.cache.has_key (host):
+            when, ttl, answer = self.cache[host]
+            # ignore TTL for now
+            callback (host, ttl, answer)
+            self.cache_hits.increment()
+        else:
+            resolver.resolve_ptr (
+                    self,
+                    host,
+                    hooked_callback (
+                            self.callback_hook,
+                            callback
+                            )
+                    )
+            
+    def callback_hook (self, host, ttl, answer):
+        self.cache[host] = time.time(), ttl, answer
+        
+    SERVER_IDENT = 'Caching DNS Resolver (V%s)' % VERSION
+    
+    def status (self):
+        import status_handler
+        import producers
+        return producers.simple_producer (
+                '<h2>%s</h2>'					% self.SERVER_IDENT
+                + '<br>Server: %s'				% self.server
+                + '<br>Cache Entries: %d'		% len(self.cache)
+                + '<br>Outstanding Requests: %d' % len(self.request_map)
+                + '<br>Forward Requests: %s'	% self.forward_requests
+                + '<br>Reverse Requests: %s'	% self.reverse_requests
+                + '<br>Cache Hits: %s'			% self.cache_hits
+                )
+        
+        #test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
+        # def test_unpacker ():
+        # 	print unpack_address_reply (test_reply)
+        # 
+        # import time
+        # class timer:
+        # 	def __init__ (self):
+        # 		self.start = time.time()
+        # 	def end (self):
+        # 		return time.time() - self.start
+        # 
+        # # I get ~290 unpacks per second for the typical case, compared to ~48
+        # # using dnslib directly.  also, that latter number does not include
+        # # picking the actual data out.
+        # 
+        # def benchmark_unpacker():
+        # 
+        # 	r = range(1000)
+        # 	t = timer()
+        # 	for i in r:
+        # 		unpack_address_reply (test_reply)
+        # 	print '%.2f unpacks per second' % (1000.0 / t.end())
+        
+if __name__ == '__main__':
+    import sys
+    if len(sys.argv) == 1:
+        print 'usage: %s [-r] [-s <server_IP>] host [host ...]' % sys.argv[0]
+        sys.exit(0)
+    elif ('-s' in sys.argv):
+        i = sys.argv.index('-s')
+        server = sys.argv[i+1]
+        del sys.argv[i:i+2]
+    else:
+        server = '127.0.0.1'
+        
+    if ('-r' in sys.argv):
+        reverse = 1
+        i = sys.argv.index('-r')
+        del sys.argv[i]
+    else:
+        reverse = 0
+        
+    if ('-m' in sys.argv):
+        maps = 1
+        sys.argv.remove ('-m')
+    else:
+        maps = 0
+        
+    if maps:
+        r = rbl (server)
+    else:
+        r = caching_resolver(server)
+        
+    count = len(sys.argv) - 1
+    
+    def print_it (host, ttl, answer):
+        global count
+        print '%s: %s' % (host, answer)
+        count = count - 1
+        if not count:
+            r.close()
+            
+    for host in sys.argv[1:]:
+        if reverse:
+            r.resolve_ptr (host, print_it)
+        elif maps:
+            r.resolve_maps (host, print_it)
+        else:
+            r.resolve (host, print_it)
+            
+            # hooked asyncore.loop()
+    while asyncore.socket_map:
+        asyncore.poll (30.0)
+        print 'requests outstanding: %d' % len(r.request_map)


=== Zope/lib/python/ZServer/medusa/status_handler.py 1.8 => 1.9 ===
--- /dev/null	Tue Mar 18 16:15:49 2003
+++ Zope/lib/python/ZServer/medusa/status_handler.py	Tue Mar 18 16:15:17 2003
@@ -0,0 +1,282 @@
+# -*- Mode: Python; tab-width: 4 -*-
+
+VERSION_STRING = "$Id$"
+
+#			
+# medusa status extension
+#
+
+import string
+import time
+import re
+
+import asyncore
+import http_server
+import medusa_gif
+import producers
+from counter import counter
+
+START_TIME = long(time.time())
+
+class status_extension:
+    hit_counter = counter()
+    
+    def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
+        self.objects = objects
+        self.statusdir = statusdir
+        self.allow_emergency_debug = allow_emergency_debug
+        # We use /status instead of statusdir here because it's too
+        # hard to pass statusdir to the logger, who makes the HREF
+        # to the object dir.  We don't need the security-through-
+        # obscurity here in any case, because the id is obscurity enough
+        self.hyper_regex = re.compile('/status/object/([0-9]+)/.*')
+        self.hyper_objects = []
+        for object in objects:
+            self.register_hyper_object (object)
+            
+    def __repr__ (self):
+        return '<Status Extension (%s hits) at %x>' % (
+                self.hit_counter,
+                id(self)
+                )
+        
+    def match (self, request):
+        path, params, query, fragment = request.split_uri()
+        # For reasons explained above, we don't use statusdir for /object
+        return (path[:len(self.statusdir)] == self.statusdir or
+                        path[:len("/status/object/")] == '/status/object/')
+        
+        # Possible Targets:
+        # /status
+        # /status/channel_list
+        # /status/medusa.gif
+        
+        # can we have 'clickable' objects?
+        # [yes, we can use id(x) and do a linear search]
+        
+        # Dynamic producers:
+        # HTTP/1.0: we must close the channel, because it's dynamic output
+        # HTTP/1.1: we can use the chunked transfer-encoding, and leave
+        #   it open.
+        
+    def handle_request (self, request):
+        [path, params, query, fragment] = split_path (request.uri)
+        self.hit_counter.increment()
+        if path == self.statusdir:          # and not a subdirectory
+            up_time = string.join (english_time (long(time.time()) - START_TIME))
+            request['Content-Type'] = 'text/html'
+            request.push (
+                    '<html>'
+                    '<title>Medusa Status Reports</title>'
+                    '<body bgcolor="#ffffff">'
+                    '<h1>Medusa Status Reports</h1>'
+                    '<b>Up:</b> %s' % up_time
+                    )
+            for i in range(len(self.objects)):
+                request.push (self.objects[i].status())
+                request.push ('<hr>\r\n')
+            request.push (
+                    '<p><a href="%s/channel_list">Channel List</a>'
+                    '<hr>'
+                    '<img src="%s/medusa.gif" align=right width=%d height=%d>'
+                    '</body></html>' % (
+                            self.statusdir,
+                            self.statusdir,
+                            medusa_gif.width,
+                            medusa_gif.height
+                            )
+                    )
+            request.done()
+        elif path == self.statusdir + '/channel_list':
+            request['Content-Type'] = 'text/html'
+            request.push ('<html><body>')
+            request.push(channel_list_producer(self.statusdir))
+            request.push (
+                    '<hr>'
+                    '<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
+                            self.statusdir,
+                            medusa_gif.width, 
+                            medusa_gif.height
+                            ) +
+                    '</body></html>'
+                    )
+            request.done()
+            
+        elif path == self.statusdir + '/medusa.gif':
+            request['Content-Type'] = 'image/gif'
+            request['Content-Length'] = len(medusa_gif.data)
+            request.push (medusa_gif.data)
+            request.done()
+            
+        elif path == self.statusdir + '/close_zombies':
+            message = (
+                    '<h2>Closing all zombie http client connections...</h2>'
+                    '<p><a href="%s">Back to the status page</a>' % self.statusdir
+                    )
+            request['Content-Type'] = 'text/html'
+            request['Content-Length'] = len (message)
+            request.push (message)
+            now = int (time.time())
+            for channel in asyncore.socket_map.keys():
+                if channel.__class__ == http_server.http_channel:
+                    if channel != request.channel:
+                        if (now - channel.creation_time) > channel.zombie_timeout:
+                            channel.close()
+            request.done()
+            
+            # Emergency Debug Mode
+            # If a server is running away from you, don't KILL it!
+            # Move all the AF_INET server ports and perform an autopsy...
+            # [disabled by default to protect the innocent]
+        elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
+            request.push ('<html>Moving All Servers...</html>')
+            request.done()
+            for channel in asyncore.socket_map.keys():
+                if channel.accepting:
+                    if type(channel.addr) is type(()):
+                        ip, port = channel.addr
+                        channel.socket.close()
+                        channel.del_channel()
+                        channel.addr = (ip, port+10000)
+                        fam, typ = channel.family_and_type
+                        channel.create_socket (fam, typ)
+                        channel.set_reuse_addr()
+                        channel.bind (channel.addr)
+                        channel.listen(5)
+                        
+        else:
+            m = self.hyper_regex.match (path)
+            if m:
+                oid = string.atoi (m.group (1))
+                for object in self.hyper_objects:
+                    if id (object) == oid:
+                        if hasattr (object, 'hyper_respond'):
+                            object.hyper_respond (self, path, request)
+            else:
+                request.error (404)
+                return
+                
+    def status (self):
+        return producers.simple_producer (
+                '<li>Status Extension <b>Hits</b> : %s' % self.hit_counter
+                )
+        
+    def register_hyper_object (self, object):
+        if not object in self.hyper_objects:
+            self.hyper_objects.append (object)
+            
+import logger
+
+class logger_for_status (logger.tail_logger):
+
+    def status (self):
+        return 'Last %d log entries for: %s' % (
+                len (self.messages),
+                html_repr (self)
+                )
+        
+    def hyper_respond (self, sh, path, request):
+        request['Content-Type'] = 'text/plain'
+        messages = self.messages[:]
+        messages.reverse()
+        request.push (lines_producer (messages))
+        request.done()
+        
+class lines_producer:
+    def __init__ (self, lines):
+        self.lines = lines
+        
+    def ready (self):
+        return len(self.lines)
+        
+    def more (self):
+        if self.lines:
+            chunk = self.lines[:50]
+            self.lines = self.lines[50:]
+            return string.join (chunk, '\r\n') + '\r\n'
+        else:
+            return ''
+            
+class channel_list_producer (lines_producer):
+    def __init__ (self, statusdir):
+        channel_reprs = map (
+                lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
+                asyncore.socket_map.values()
+                )
+        channel_reprs.sort()
+        lines_producer.__init__ (
+                self,
+                ['<h1>Active Channel List</h1>',
+                 '<pre>'
+                 ] + channel_reprs + [
+                         '</pre>',
+                         '<p><a href="%s">Status Report</a>' % statusdir
+                         ]
+                )
+        
+        
+        # this really needs a full-blown quoter...
+def sanitize (s):
+    if '<' in s:
+        s = string.join (string.split (s, '<'), '&lt;')
+    if '>' in s:
+        s = string.join (string.split (s, '>'), '&gt;')
+    return s
+    
+def html_repr (object):
+    so = sanitize (repr (object))
+    if hasattr (object, 'hyper_respond'):
+        return '<a href="/status/object/%d/">%s</a>' % (id (object), so)
+    else:
+        return so
+        
+def html_reprs (list, front='', back=''):
+    reprs = map (
+            lambda x,f=front,b=back: '%s%s%s' % (f,x,b),
+            map (lambda x: sanitize (html_repr(x)), list)
+            )
+    reprs.sort()
+    return reprs
+    
+    # for example, tera, giga, mega, kilo
+    # p_d (n, (1024, 1024, 1024, 1024))
+    # smallest divider goes first - for example
+    # minutes, hours, days
+    # p_d (n, (60, 60, 24))
+    
+def progressive_divide (n, parts):
+    result = []
+    for part in parts:
+        n, rem = divmod (n, part)
+        result.append (rem)
+    result.append (n)
+    return result
+    
+    # b,k,m,g,t
+def split_by_units (n, units, dividers, format_string):
+    divs = progressive_divide (n, dividers)
+    result = []
+    for i in range(len(units)):
+        if divs[i]:
+            result.append (format_string % (divs[i], units[i]))
+    result.reverse()
+    if not result:
+        return [format_string % (0, units[0])]
+    else:
+        return result
+        
+def english_bytes (n):
+    return split_by_units (
+            n,
+            ('','K','M','G','T'),
+            (1024, 1024, 1024, 1024, 1024),
+            '%d %sB'
+            )
+    
+def english_time (n):
+    return split_by_units (
+            n,
+            ('secs', 'mins', 'hours', 'days', 'weeks', 'years'),
+            (         60,     60,      24,     7,       52),
+            '%d %s'
+            )