[Zope3-checkins] SVN: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ - Added ClientCookie.

Stephan Richter srichter at cosmos.phy.tufts.edu
Thu Oct 6 16:23:17 EDT 2005


Log message for revision 38827:
  - Added ClientCookie.
  
  - Made test non-US savvy. :-) Peh, those ignorant US-based people! :-) 
    Doh!, that includes me...
  
  

Changed:
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_BSDDBCookieJar.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ClientCookie.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ConnCache.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Debug.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_HeadersUtil.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_LWPCookieJar.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIECookieJar.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIEDBCookieJar.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MozillaCookieJar.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Opener.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Request.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Util.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/__init__.py
  A   Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_urllib2_support.py
  U   Zope3/branches/testbrowser-integration/src/zope/testbrowser/__init__.py
  U   Zope3/branches/testbrowser-integration/src/zope/testbrowser/over_the_wire.txt

-=-
Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_BSDDBCookieJar.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_BSDDBCookieJar.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_BSDDBCookieJar.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,180 @@
+"""Persistent CookieJar based on bsddb standard library module.
+
+Copyright 2003-2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+**********************************************************************
+THIS IS NOT FULLY TESTED!
+**********************************************************************
+
+"""
+
+from _ClientCookie import CookieJar, MappingIterator
+from _Debug import getLogger
+debug = getLogger("ClientCookie").debug
+
+import bsddb
+import cPickle
+pickle = cPickle
+del cPickle
+
+try: StopIteration
+except NameError:
+    from _ClientCookie import StopIteration
+
+def CreateBSDDBCookieJar(filename, policy=None):
+    """Return a BSDDBCookieJar given a BSDDB filename.
+
+    Use this unless rather than directly using the BSDDBCookieJar constructor
+    unless you know what you're doing.
+
+    filename: filename for sleepycat BSDDB database; if the file doesn't exist,
+     it will be created; otherwise, it will be opened
+
+    **********************************************************************
+    BSDDBCookieJar IS NOT FULLY TESTED!
+    **********************************************************************
+
+    """
+    db = bsddb.db.DB()
+    db.open(filename, bsddb.db.DB_HASH, bsddb.db.DB_CREATE, 0666)
+    return BSDDBCookieJar(policy, db)
+
+class BSDDBIterator:
+    # XXXX should this use thread lock?
+    def __init__(self, cursor):
+        iterator = None
+        self._c = cursor
+        self._i = iterator
+    def __iter__(self): return self
+    def close(self):
+        if self._c is not None:
+            self._c.close()
+        self._c = self._i = self.next = self.__iter__ = None
+    def next(self):
+        while 1:
+            if self._i is None:
+                item = self._c.next()
+                if item is None:
+                    self.close()
+                    raise StopIteration()
+                domain, data = item
+                self._i = MappingIterator(pickle.loads(data))
+            try:
+                return self._i.next()
+            except StopIteration:
+                self._i = None
+                continue
+    def __del__(self):
+        # XXXX will this work?
+        self.close()
+
+class BSDDBCookieJar(CookieJar):
+    """CookieJar based on a BSDDB database, using the standard bsddb module.
+
+    You should use CreateBSDDBCookieJar instead of the constructor, unless you
+    know what you're doing.
+
+    Note that session cookies ARE stored in the database (marked as session
+    cookies), and will be written to disk if the database is file-based.  In
+    order to clear session cookies at the end of a session, you must call
+    .clear_session_cookies().
+
+    Call the .close() method after you've finished using an instance of this
+    class.
+
+    **********************************************************************
+    THIS IS NOT FULLY TESTED!
+    **********************************************************************
+
+    """
+    # XXX
+    # use transactions to make multiple reader processes possible
+    def __init__(self, policy=None, db=None):
+        CookieJar.__init__(self, policy)
+        del self._cookies
+        if db is None:
+            db = bsddb.db.DB()
+        self._db = db
+    def close(self):
+        self._db.close()
+    def __del__(self):
+        # XXXX will this work?
+        self.close()
+    def clear(self, domain=None, path=None, name=None):
+        if name is not None:
+            if (domain is None) or (path is None):
+                raise ValueError(
+                    "domain and path must be given to remove a cookie by name")
+        elif path is not None:
+            if domain is None:
+                raise ValueError(
+                    "domain must be given to remove cookies by path")
+
+        db = self._db
+        self._cookies_lock.acquire()
+        try:
+            if domain is not None:
+                data = db.get(domain)
+                if data is not None:
+                    if path is name is None:
+                        db.delete(domain)
+                    else:
+                        c2 = pickle.loads(data)
+                        if name is None:
+                            del c2[path]
+                        else:
+                            del c2[path][name]
+                else:
+                    raise KeyError("no domain '%s'" % domain)
+        finally:
+            self._cookies_lock.release()
+    def set_cookie(self, cookie):
+        db = self._db
+        self._cookies_lock.acquire()
+        try:
+            # store 2-level dict under domain, like {path: {name: value}}
+            data = db.get(cookie.domain)
+            if data is None:
+                c2 = {}
+            else:
+                c2 = pickle.loads(data)
+            if not c2.has_key(cookie.path): c2[cookie.path] = {}
+            c3 = c2[cookie.path]
+            c3[cookie.name] = cookie
+            db.put(cookie.domain, pickle.dumps(c2))
+        finally:
+            self._cookies_lock.release()
+    def __iter__(self):
+        return BSDDBIterator(self._db.cursor())
+    def _cookies_for_request(self, request):
+        """Return a list of cookies to be returned to server."""
+        cookies = []
+        for domain in self._db.keys():
+            cookies.extend(self._cookies_for_domain(domain, request))
+        return cookies
+    def _cookies_for_domain(self, domain, request, unverifiable):
+        debug("Checking %s for cookies to return", domain)
+        if not self._policy.domain_return_ok(domain, request, unverifiable):
+            return []
+
+        data = self._db.get(domain)
+        if data is None:
+            return []
+        cookies_by_path = pickle.loads(data)
+
+        cookies = []
+        for path in cookies_by_path.keys():
+            if not self._policy.path_return_ok(path, request, unverifiable):
+                continue
+            for name, cookie in cookies_by_path[path].items():
+                if not self._policy.return_ok(cookie, request, unverifiable):
+                    debug("   not returning cookie")
+                    continue
+                debug("   it's a match")
+                cookies.append(cookie)
+
+        return cookies


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_BSDDBCookieJar.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ClientCookie.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ClientCookie.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ClientCookie.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,1687 @@
+"""HTTP cookie handling for web clients, plus some other stuff.
+
+This module originally developed from my port of Gisle Aas' Perl module
+HTTP::Cookies, from the libwww-perl library.
+
+Docstrings, comments and debug strings in this code refer to the
+attributes of the HTTP cookie system as cookie-attributes, to distinguish
+them clearly from Python attributes.
+
+                        CookieJar____
+                        /     \      \
+            FileCookieJar      \      \
+             /    |   \         \      \
+ MozillaCookieJar | LWPCookieJar \      \
+                  |               |      \
+                  |   ---MSIEBase |       \
+                  |  /      |     |        \
+                  | /   MSIEDBCookieJar BSDDBCookieJar
+                  |/    
+               MSIECookieJar
+
+Comments to John J Lee <jjl at pobox.com>.
+
+
+Copyright 2002-2005 John J Lee <jjl at pobox.com>
+Copyright 1997-1999 Gisle Aas (original libwww-perl code)
+Copyright 2002-2003 Johnny Lee (original MSIE Perl code)
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+VERSION = "1.0.3"
+
+
+# Public health warning: anyone who thought 'cookies are simple, aren't they?',
+# run away now :-(
+
+import sys, re, urlparse, string, copy, time, struct, urllib, types
+try:
+    import threading
+    _threading = threading; del threading
+except ImportError:
+    import dummy_threading
+    _threading = dummy_threading; del dummy_threading
+import httplib  # only for the default HTTP port
+
+MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
+                         "instance initialised with one)")
+DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+try:
+    from types import UnicodeType
+except ImportError:
+    UNICODE = False
+else:
+    UNICODE = True
+
+try: StopIteration
+except NameError:
+    class StopIteration(Exception): pass
+
+import ClientCookie
+from _HeadersUtil import split_header_words, parse_ns_headers
+from _Util import startswith, endswith, isstringlike, getheaders
+from _Debug import warn, getLogger
+debug = getLogger("ClientCookie").debug
+
+try: bool
+except NameError:
+    def bool(expr):
+        if expr: return True
+        else: return False
+
+try: issubclass(Exception, (Exception,))
+except TypeError:
+    real_issubclass = issubclass
+    from _Util import compat_issubclass
+    issubclass = compat_issubclass
+    del compat_issubclass
+
+def reraise_unmasked_exceptions(unmasked=()):
+    # There are a few catch-all except: statements in this module, for
+    # catching input that's bad in unexpected ways.
+    # This function re-raises some exceptions we don't want to trap.
+    if not ClientCookie.USE_BARE_EXCEPT:
+        raise
+    unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError)
+    etype = sys.exc_info()[0]
+    if issubclass(etype, unmasked):
+        raise
+    # swallowed an exception
+    import traceback, StringIO
+    f = StringIO.StringIO()
+    traceback.print_exc(None, f)
+    msg = f.getvalue()
+    warn("ClientCookie bug!\n%s" % msg)
+
+
+IPV4_RE = re.compile(r"\.\d+$")
+def is_HDN(text):
+    """Return True if text is a host domain name."""
+    # XXX
+    # This may well be wrong.  Which RFC is HDN defined in, if any (for
+    #  the purposes of RFC 2965)?
+    # For the current implementation, what about IPv6?  Remember to look
+    #  at other uses of IPV4_RE also, if change this.
+    return not (IPV4_RE.search(text) or
+                text == "" or
+                text[0] == "." or text[-1] == ".")
+
+def domain_match(A, B):
+    """Return True if domain A domain-matches domain B, according to RFC 2965.
+
+    A and B may be host domain names or IP addresses.
+
+    RFC 2965, section 1:
+
+    Host names can be specified either as an IP address or a HDN string.
+    Sometimes we compare one host name with another.  (Such comparisons SHALL
+    be case-insensitive.)  Host A's name domain-matches host B's if
+
+         *  their host name strings string-compare equal; or
+
+         * A is a HDN string and has the form NB, where N is a non-empty
+            name string, B has the form .B', and B' is a HDN string.  (So,
+            x.y.com domain-matches .Y.com but not Y.com.)
+
+    Note that domain-match is not a commutative operation: a.b.c.com
+    domain-matches .c.com, but not the reverse.
+
+    """
+    # Note that, if A or B are IP addresses, the only relevant part of the
+    # definition of the domain-match algorithm is the direct string-compare.
+    A = string.lower(A)
+    B = string.lower(B)
+    if A == B:
+        return True
+    if not is_HDN(A):
+        return False
+    i = string.rfind(A, B)
+    has_form_nb = not (i == -1 or i == 0)
+    return (
+        has_form_nb and
+        startswith(B, ".") and
+        is_HDN(B[1:])
+        )
+
+def liberal_is_HDN(text):
+    """Return True if text is a sort-of-like a host domain name.
+
+    For accepting/blocking domains.
+
+    """
+    return not IPV4_RE.search(text)
+
+def user_domain_match(A, B):
+    """For blocking/accepting domains.
+
+    A and B may be host domain names or IP addresses.
+
+    """
+    A = string.lower(A)
+    B = string.lower(B)
+    if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
+        if A == B:
+            # equal IP addresses
+            return True
+        return False
+    initial_dot = startswith(B, ".")
+    if initial_dot and endswith(A, B):
+        return True
+    if not initial_dot and A == B:
+        return True
+    return False
+
+cut_port_re = re.compile(r":\d+$")
+def request_host(request):
+    """Return request-host, as defined by RFC 2965.
+
+    Variation from RFC: returned value is lowercased, for convenient
+    comparison.
+
+    """
+    url = request.get_full_url()
+    host = urlparse.urlparse(url)[1]
+    if host == "":
+        host = request.get_header("Host", "")
+
+    # remove port, if present
+    host = cut_port_re.sub("", host, 1)
+    return string.lower(host)
+
+def eff_request_host(request):
+    """Return a tuple (request-host, effective request-host name).
+
+    As defined by RFC 2965, except both are lowercased.
+
+    """
+    erhn = req_host = request_host(request)
+    if string.find(req_host, ".") == -1 and not IPV4_RE.search(req_host):
+        erhn = req_host + ".local"
+    return req_host, erhn
+
+def request_path(request):
+    """request-URI, as defined by RFC 2965."""
+    url = request.get_full_url()
+    #scheme, netloc, path, parameters, query, frag = urlparse.urlparse(url)
+    #req_path = escape_path(string.join(urlparse.urlparse(url)[2:], ""))
+    path, parameters, query, frag = urlparse.urlparse(url)[2:]
+    if parameters:
+        path = "%s;%s" % (path, parameters)
+    path = escape_path(path)
+    req_path = urlparse.urlunparse(("", "", path, "", query, frag))
+    if not startswith(req_path, "/"):
+        # fix bad RFC 2396 absoluteURI
+        req_path = "/"+req_path
+    return req_path
+
+def request_port(request):
+    host = request.get_host()
+    i = string.find(host, ':')
+    if i >= 0:
+        port = host[i+1:]
+        try:
+            int(port)
+        except ValueError:
+            debug("nonnumeric port: '%s'", port)
+            return None
+    else:
+        port = DEFAULT_HTTP_PORT
+    return port
+
+# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
+# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
+HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
+ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
+def uppercase_escaped_char(match):
+    return "%%%s" % string.upper(match.group(1))
+def escape_path(path):
+    """Escape any invalid characters in HTTP URL, and uppercase all escapes."""
+    # There's no knowing what character encoding was used to create URLs
+    # containing %-escapes, but since we have to pick one to escape invalid
+    # path characters, we pick UTF-8, as recommended in the HTML 4.0
+    # specification:
+    # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
+    # And here, kind of: draft-fielding-uri-rfc2396bis-03
+    # (And in draft IRI specification: draft-duerst-iri-05)
+    # (And here, for new URI schemes: RFC 2718)
+    if UNICODE and isinstance(path, types.UnicodeType):
+        path = path.encode("utf-8")
+    path = urllib.quote(path, HTTP_PATH_SAFE)
+    path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
+    return path
+
+def reach(h):
+    """Return reach of host h, as defined by RFC 2965, section 1.
+
+    The reach R of a host name H is defined as follows:
+
+       *  If
+
+          -  H is the host domain name of a host; and,
+
+          -  H has the form A.B; and
+
+          -  A has no embedded (that is, interior) dots; and
+
+          -  B has at least one embedded dot, or B is the string "local".
+             then the reach of H is .B.
+
+       *  Otherwise, the reach of H is H.
+
+    >>> reach("www.acme.com")
+    '.acme.com'
+    >>> reach("acme.com")
+    'acme.com'
+    >>> reach("acme.local")
+    '.local'
+
+    """
+    i = string.find(h, ".")
+    if i >= 0:
+        #a = h[:i]  # this line is only here to show what a is
+        b = h[i+1:]
+        i = string.find(b, ".")
+        if is_HDN(h) and (i >= 0 or b == "local"):
+            return "."+b
+    return h
+
+def is_third_party(request):
+    """
+
+    RFC 2965, section 3.3.6:
+
+        An unverifiable transaction is to a third-party host if its request-
+        host U does not domain-match the reach R of the request-host O in the
+        origin transaction.
+
+    """
+    req_host = request_host(request)
+    # the origin request's request-host was stuffed into request by
+    # _urllib2_support.AbstractHTTPHandler
+    return not domain_match(req_host, reach(request.origin_req_host))
+
+
+class Cookie:
+    """HTTP Cookie.
+
+    This class represents both Netscape and RFC 2965 cookies.
+
+    This is deliberately a very simple class.  It just holds attributes.  It's
+    possible to construct Cookie instances that don't comply with the cookie
+    standards.  CookieJar.make_cookies is the factory function for Cookie
+    objects -- it deals with cookie parsing, supplying defaults, and
+    normalising to the representation used in this class.  CookiePolicy is
+    responsible for checking them to see whether they should be accepted from
+    and returned to the server.
+
+    version: integer;
+    name: string;
+    value: string (may be None);
+    port: string; None indicates no attribute was supplied (eg. "Port", rather
+     than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list
+     string (eg. "80,8080")
+    port_specified: boolean; true if a value was supplied with the Port
+     cookie-attribute
+    domain: string;
+    domain_specified: boolean; true if Domain was explicitly set
+    domain_initial_dot: boolean; true if Domain as set in HTTP header by server
+     started with a dot (yes, this really is necessary!)
+    path: string;
+    path_specified: boolean; true if Path was explicitly set
+    secure:  boolean; true if should only be returned over secure connection
+    expires: integer; seconds since epoch (RFC 2965 cookies should calculate
+     this value from the Max-Age attribute)
+    discard: boolean, true if this is a session cookie; (if no expires value,
+     this should be true)
+    comment: string;
+    comment_url: string;
+    rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not
+     Set-Cookie2:) header, but had a version cookie-attribute of 1
+    rest: mapping of other cookie-attributes
+
+    Note that the port may be present in the headers, but unspecified ("Port"
+    rather than"Port=80", for example); if this is the case, port is None.
+
+    """
+
+    def __init__(self, version, name, value,
+                 port, port_specified,
+                 domain, domain_specified, domain_initial_dot,
+                 path, path_specified,
+                 secure,
+                 expires,
+                 discard,
+                 comment,
+                 comment_url,
+                 rest,
+                 rfc2109=False,
+                 ):
+
+        if version is not None: version = int(version)
+        if expires is not None: expires = int(expires)
+        if port is None and port_specified is True:
+            raise ValueError("if port is None, port_specified must be false")
+
+        self.version = version
+        self.name = name
+        self.value = value
+        self.port = port
+        self.port_specified = port_specified
+        # normalise case, as per RFC 2965 section 3.3.3
+        self.domain = string.lower(domain)
+        self.domain_specified = domain_specified
+        # Sigh.  We need to know whether the domain given in the
+        # cookie-attribute had an initial dot, in order to follow RFC 2965
+        # (as clarified in draft errata).  Needed for the returned $Domain
+        # value.
+        self.domain_initial_dot = domain_initial_dot
+        self.path = path
+        self.path_specified = path_specified
+        self.secure = secure
+        self.expires = expires
+        self.discard = discard
+        self.comment = comment
+        self.comment_url = comment_url
+        self.rfc2109 = rfc2109
+
+        self._rest = copy.copy(rest)
+
+    def has_nonstandard_attr(self, name):
+        return self._rest.has_key(name)
+    def get_nonstandard_attr(self, name, default=None):
+        return self._rest.get(name, default)
+    def set_nonstandard_attr(self, name, value):
+        self._rest[name] = value
+    def nonstandard_attr_keys(self):
+        return self._rest.keys()
+
+    def is_expired(self, now=None):
+        if now is None: now = time.time()
+        return (self.expires is not None) and (self.expires <= now)
+
+    def __str__(self):
+        if self.port is None: p = ""
+        else: p = ":"+self.port
+        limit = self.domain + p + self.path
+        if self.value is not None:
+            namevalue = "%s=%s" % (self.name, self.value)
+        else:
+            namevalue = self.name
+        return "<Cookie %s for %s>" % (namevalue, limit)
+
+    def __repr__(self):
+        args = []
+        for name in ["version", "name", "value",
+                     "port", "port_specified",
+                     "domain", "domain_specified", "domain_initial_dot",
+                     "path", "path_specified",
+                     "secure", "expires", "discard", "comment", "comment_url",
+                     ]:
+            attr = getattr(self, name)
+            args.append("%s=%s" % (name, repr(attr)))
+        args.append("rest=%s" % repr(self._rest))
+        args.append("rfc2109=%s" % repr(self.rfc2109))
+        return "Cookie(%s)" % string.join(args, ", ")
+
+
+class CookiePolicy:
+    """Defines which cookies get accepted from and returned to server.
+
+    May also modify cookies.
+
+    The subclass DefaultCookiePolicy defines the standard rules for Netscape
+    and RFC 2965 cookies -- override that if you want a customised policy.
+
+    As well as implementing set_ok and return_ok, implementations of this
+    interface must also supply the following attributes, indicating which
+    protocols should be used, and how.  These can be read and set at any time,
+    though whether that makes complete sense from the protocol point of view is
+    doubtful.
+
+    Public attributes:
+
+    netscape: implement netscape protocol
+    rfc2965: implement RFC 2965 protocol
+    rfc2109_as_netscape:
+       WARNING: This argument will change or go away if is not accepted into
+                the Python standard library in this form!
+     If true, treat RFC 2109 cookies as though they were Netscape cookies.  The
+     default is for this attribute to be None, which means treat 2109 cookies
+     as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is,
+     by default), and as Netscape cookies otherwise.
+    hide_cookie2: don't add Cookie2 header to requests (the presence of
+     this header indicates to the server that we understand RFC 2965
+     cookies)
+
+    """
+    def set_ok(self, cookie, request):
+        """Return true if (and only if) cookie should be accepted from server.
+
+        Currently, pre-expired cookies never get this far -- the CookieJar
+        class deletes such cookies itself.
+
+        cookie: ClientCookie.Cookie object
+        request: object implementing the interface defined by
+         CookieJar.extract_cookies.__doc__
+
+        """
+        raise NotImplementedError()
+
+    def return_ok(self, cookie, request):
+        """Return true if (and only if) cookie should be returned to server.
+
+        cookie: ClientCookie.Cookie object
+        request: object implementing the interface defined by
+         CookieJar.add_cookie_header.__doc__
+
+        """
+        raise NotImplementedError()
+
+    def domain_return_ok(self, domain, request):
+        """Return false if cookies should not be returned, given cookie domain.
+
+        This is here as an optimization, to remove the need for checking every
+        cookie with a particular domain (which may involve reading many files).
+        The default implementations of domain_return_ok and path_return_ok
+        (return True) leave all the work to return_ok.
+
+        If domain_return_ok returns true for the cookie domain, path_return_ok
+        is called for the cookie path.  Otherwise, path_return_ok and return_ok
+        are never called for that cookie domain.  If path_return_ok returns
+        true, return_ok is called with the Cookie object itself for a full
+        check.  Otherwise, return_ok is never called for that cookie path.
+
+        Note that domain_return_ok is called for every *cookie* domain, not
+        just for the *request* domain.  For example, the function might be
+        called with both ".acme.com" and "www.acme.com" if the request domain is
+        "www.acme.com".  The same goes for path_return_ok.
+
+        For argument documentation, see the docstring for return_ok.
+
+        """
+        return True
+
+    def path_return_ok(self, path, request):
+        """Return false if cookies should not be returned, given cookie path.
+
+        See the docstring for domain_return_ok.
+
+        """
+        return True
+
+
+class DefaultCookiePolicy(CookiePolicy):
+    """Implements the standard rules for accepting and returning cookies.
+
+    Both RFC 2965 and Netscape cookies are covered.  RFC 2965 handling is
+    switched off by default.
+
+    The easiest way to provide your own policy is to override this class and
+    call its methods in your overriden implementations before adding your own
+    additional checks.
+
+    import ClientCookie
+    class MyCookiePolicy(ClientCookie.DefaultCookiePolicy):
+        def set_ok(self, cookie, request):
+            if not ClientCookie.DefaultCookiePolicy.set_ok(
+                self, cookie, request):
+                return False
+            if i_dont_want_to_store_this_cookie():
+                return False
+            return True
+
+    In addition to the features required to implement the CookiePolicy
+    interface, this class allows you to block and allow domains from setting
+    and receiving cookies.  There are also some strictness switches that allow
+    you to tighten up the rather loose Netscape protocol rules a little bit (at
+    the cost of blocking some benign cookies).
+
+    A domain blacklist and whitelist is provided (both off by default).  Only
+    domains not in the blacklist and present in the whitelist (if the whitelist
+    is active) participate in cookie setting and returning.  Use the
+    blocked_domains constructor argument, and blocked_domains and
+    set_blocked_domains methods (and the corresponding argument and methods for
+    allowed_domains).  If you set a whitelist, you can turn it off again by
+    setting it to None.
+
+    Domains in block or allow lists that do not start with a dot must
+    string-compare equal.  For example, "acme.com" matches a blacklist entry of
+    "acme.com", but "www.acme.com" does not.  Domains that do start with a dot
+    are matched by more specific domains too.  For example, both "www.acme.com"
+    and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
+    not).  IP addresses are an exception, and must match exactly.  For example,
+    if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
+    blocked, but 193.168.1.2 is not.
+
+    Additional Public Attributes:
+
+    General strictness switches
+
+    strict_domain: don't allow sites to set two-component domains with
+     country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
+     This is far from perfect and isn't guaranteed to work!
+
+    RFC 2965 protocol strictness switches
+
+    strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
+     transactions (usually, an unverifiable transaction is one resulting from
+     a redirect or an image hosted on another site); if this is false, cookies
+     are NEVER blocked on the basis of verifiability
+
+    Netscape protocol strictness switches
+
+    strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
+     even to Netscape cookies
+    strict_ns_domain: flags indicating how strict to be with domain-matching
+     rules for Netscape cookies:
+      DomainStrictNoDots: when setting cookies, host prefix must not contain a
+       dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
+       www.foo contains a dot)
+      DomainStrictNonDomain: cookies that did not explicitly specify a Domain
+       cookie-attribute can only be returned to a domain that string-compares
+       equal to the domain that set the cookie (eg. rockets.acme.com won't
+       be returned cookies from acme.com that had no Domain cookie-attribute)
+      DomainRFC2965Match: when setting cookies, require a full RFC 2965
+       domain-match
+      DomainLiberal and DomainStrict are the most useful combinations of the
+       above flags, for convenience
+    strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
+     have names starting with '$'
+    strict_ns_set_path: don't allow setting cookies whose path doesn't
+     path-match request URI
+
+    """
+
+    DomainStrictNoDots = 1
+    DomainStrictNonDomain = 2
+    DomainRFC2965Match = 4
+
+    DomainLiberal = 0
+    DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
+
+    def __init__(self,
+                 blocked_domains=None, allowed_domains=None,
+                 netscape=True, rfc2965=False,
+                 # WARNING: this argument will change or go away if is not
+                 # accepted into the Python standard library in this form!
+                 # default, ie. treat 2109 as netscape iff not rfc2965
+                 rfc2109_as_netscape=None,
+                 hide_cookie2=False,
+                 strict_domain=False,
+                 strict_rfc2965_unverifiable=True,
+                 strict_ns_unverifiable=False,
+                 strict_ns_domain=DomainLiberal,
+                 strict_ns_set_initial_dollar=False,
+                 strict_ns_set_path=False,
+                 ):
+        """
+        Constructor arguments should be used as keyword arguments only.
+
+        blocked_domains: sequence of domain names that we never accept cookies
+         from, nor return cookies to
+        allowed_domains: if not None, this is a sequence of the only domains
+         for which we accept and return cookies
+
+        For other arguments, see CookiePolicy.__doc__ and
+        DefaultCookiePolicy.__doc__..
+
+        """
+        self.netscape = netscape
+        self.rfc2965 = rfc2965
+        self.rfc2109_as_netscape = rfc2109_as_netscape
+        self.hide_cookie2 = hide_cookie2
+        self.strict_domain = strict_domain
+        self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
+        self.strict_ns_unverifiable = strict_ns_unverifiable
+        self.strict_ns_domain = strict_ns_domain
+        self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
+        self.strict_ns_set_path = strict_ns_set_path
+
+        if blocked_domains is not None:
+            self._blocked_domains = tuple(blocked_domains)
+        else:
+            self._blocked_domains = ()
+
+        if allowed_domains is not None:
+            allowed_domains = tuple(allowed_domains)
+        self._allowed_domains = allowed_domains
+
+    def blocked_domains(self):
+        """Return the sequence of blocked domains (as a tuple)."""
+        return self._blocked_domains
+    def set_blocked_domains(self, blocked_domains):
+        """Set the sequence of blocked domains."""
+        self._blocked_domains = tuple(blocked_domains)
+
+    def is_blocked(self, domain):
+        for blocked_domain in self._blocked_domains:
+            if user_domain_match(domain, blocked_domain):
+                return True
+        return False
+
+    def allowed_domains(self):
+        """Return None, or the sequence of allowed domains (as a tuple)."""
+        return self._allowed_domains
+    def set_allowed_domains(self, allowed_domains):
+        """Set the sequence of allowed domains, or None."""
+        if allowed_domains is not None:
+            allowed_domains = tuple(allowed_domains)
+        self._allowed_domains = allowed_domains
+
+    def is_not_allowed(self, domain):
+        if self._allowed_domains is None:
+            return False
+        for allowed_domain in self._allowed_domains:
+            if user_domain_match(domain, allowed_domain):
+                return False
+        return True
+
+    def set_ok(self, cookie, request):
+        """
+        If you override set_ok, be sure to call this method.  If it returns
+        false, so should your subclass (assuming your subclass wants to be more
+        strict about which cookies to accept).
+
+        """
+        debug(" - checking cookie %s", cookie)
+
+        assert cookie.name is not None
+
+        for n in "version", "verifiability", "name", "path", "domain", "port":
+            fn_name = "set_ok_"+n
+            fn = getattr(self, fn_name)
+            if not fn(cookie, request):
+                return False
+
+        return True
+
+    def set_ok_version(self, cookie, request):
+        if cookie.version is None:
+            # Version is always set to 0 by parse_ns_headers if it's a Netscape
+            # cookie, so this must be an invalid RFC 2965 cookie.
+            debug("   Set-Cookie2 without version attribute (%s)", cookie)
+            return False
+        if cookie.version > 0 and not self.rfc2965:
+            debug("   RFC 2965 cookies are switched off")
+            return False
+        elif cookie.version == 0 and not self.netscape:
+            debug("   Netscape cookies are switched off")
+            return False
+        return True
+
+    def set_ok_verifiability(self, cookie, request):
+        if request.unverifiable and is_third_party(request):
+            if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+                debug("   third-party RFC 2965 cookie during "
+                             "unverifiable transaction")
+                return False
+            elif cookie.version == 0 and self.strict_ns_unverifiable:
+                debug("   third-party Netscape cookie during "
+                             "unverifiable transaction")
+                return False
+        return True
+
+    def set_ok_name(self, cookie, request):
+        # Try and stop servers setting V0 cookies designed to hack other
+        # servers that know both V0 and V1 protocols.
+        if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
+            startswith(cookie.name, "$")):
+            debug("   illegal name (starts with '$'): '%s'", cookie.name)
+            return False
+        return True
+
+    def set_ok_path(self, cookie, request):
+        if cookie.path_specified:
+            req_path = request_path(request)
+            if ((cookie.version > 0 or
+                 (cookie.version == 0 and self.strict_ns_set_path)) and
+                not startswith(req_path, cookie.path)):
+                debug("   path attribute %s is not a prefix of request "
+                      "path %s", cookie.path, req_path)
+                return False
+        return True
+
+    def set_ok_countrycode_domain(self, cookie, request):
+        """Return False if explicit cookie domain is not acceptable.
+
+        Called by set_ok_domain, for convenience of overriding by
+        subclasses.
+
+        """
+        if cookie.domain_specified and self.strict_domain:
+            domain = cookie.domain
+            # since domain was specified, we know that:
+            assert domain.startswith(".")
+            if string.count(domain, ".") == 2:
+                # domain like .foo.bar
+                i = string.rfind(domain, ".")
+                tld = domain[i+1:]
+                sld = domain[1:i]
+                if (string.lower(sld) in [
+                    "co", "ac",
+                    "com", "edu", "org", "net", "gov", "mil", "int"] and
+                    len(tld) == 2):
+                    # domain like .co.uk
+                    return False
+        return True
+
+    def set_ok_domain(self, cookie, request):
+        if self.is_blocked(cookie.domain):
+            debug("   domain %s is in user block-list", cookie.domain)
+            return False
+        if self.is_not_allowed(cookie.domain):
+            debug("   domain %s is not in user allow-list", cookie.domain)
+            return False
+        if not self.set_ok_countrycode_domain(cookie, request):
+            debug("   country-code second level domain %s", cookie.domain)
+            return False
+        if cookie.domain_specified:
+            req_host, erhn = eff_request_host(request)
+            domain = cookie.domain
+            if startswith(domain, "."):
+                undotted_domain = domain[1:]
+            else:
+                undotted_domain = domain
+            embedded_dots = (string.find(undotted_domain, ".") >= 0)
+            if not embedded_dots and domain != ".local":
+                debug("   non-local domain %s contains no embedded dot",
+                      domain)
+                return False
+            if cookie.version == 0:
+                if (not endswith(erhn, domain) and
+                    (not startswith(erhn, ".") and
+                     not endswith("."+erhn, domain))):
+                    debug("   effective request-host %s (even with added "
+                          "initial dot) does not end end with %s",
+                          erhn, domain)
+                    return False
+            if (cookie.version > 0 or
+                (self.strict_ns_domain & self.DomainRFC2965Match)):
+                if not domain_match(erhn, domain):
+                    debug("   effective request-host %s does not domain-match "
+                          "%s", erhn, domain)
+                    return False
+            if (cookie.version > 0 or
+                (self.strict_ns_domain & self.DomainStrictNoDots)):
+                host_prefix = req_host[:-len(domain)]
+                if (string.find(host_prefix, ".") >= 0 and
+                    not IPV4_RE.search(req_host)):
+                    debug("   host prefix %s for domain %s contains a dot",
+                          host_prefix, domain)
+                    return False
+        return True
+
+    def set_ok_port(self, cookie, request):
+        if cookie.port_specified:
+            req_port = request_port(request)
+            if req_port is None:
+                req_port = "80"
+            else:
+                req_port = str(req_port)
+            for p in string.split(cookie.port, ","):
+                try:
+                    int(p)
+                except ValueError:
+                    debug("   bad port %s (not numeric)", p)
+                    return False
+                if p == req_port:
+                    break
+            else:
+                debug("   request port (%s) not found in %s",
+                      req_port, cookie.port)
+                return False
+        return True
+
+    def return_ok(self, cookie, request):
+        """
+        If you override return_ok, be sure to call this method.  If it returns
+        false, so should your subclass (assuming your subclass wants to be more
+        strict about which cookies to return).
+
+        """
+        # Path has already been checked by path_return_ok, and domain blocking
+        # done by domain_return_ok.
+        debug(" - checking cookie %s", cookie)
+
+        for n in "version", "verifiability", "secure", "expires", "port", "domain":
+            fn_name = "return_ok_"+n
+            fn = getattr(self, fn_name)
+            if not fn(cookie, request):
+                return False
+        return True
+
+    def return_ok_version(self, cookie, request):
+        if cookie.version > 0 and not self.rfc2965:
+            debug("   RFC 2965 cookies are switched off")
+            return False
+        elif cookie.version == 0 and not self.netscape:
+            debug("   Netscape cookies are switched off")
+            return False
+        return True
+
+    def return_ok_verifiability(self, cookie, request):
+        if request.unverifiable and is_third_party(request):
+            if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+                debug("   third-party RFC 2965 cookie during unverifiable "
+                      "transaction")
+                return False
+            elif cookie.version == 0 and self.strict_ns_unverifiable:
+                debug("   third-party Netscape cookie during unverifiable "
+                      "transaction")
+                return False
+        return True
+
+    def return_ok_secure(self, cookie, request):
+        if cookie.secure and request.get_type() != "https":
+            debug("   secure cookie with non-secure request")
+            return False
+        return True
+
+    def return_ok_expires(self, cookie, request):
+        if cookie.is_expired(self._now):
+            debug("   cookie expired")
+            return False
+        return True
+
+    def return_ok_port(self, cookie, request):
+        if cookie.port:
+            req_port = request_port(request)
+            if req_port is None:
+                req_port = "80"
+            for p in string.split(cookie.port, ","):
+                if p == req_port:
+                    break
+            else:
+                debug("   request port %s does not match cookie port %s",
+                      req_port, cookie.port)
+                return False
+        return True
+
+    def return_ok_domain(self, cookie, request):
+        req_host, erhn = eff_request_host(request)
+        domain = cookie.domain
+
+        # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
+        if (cookie.version == 0 and
+            (self.strict_ns_domain & self.DomainStrictNonDomain) and
+            not cookie.domain_specified and domain != erhn):
+            debug("   cookie with unspecified domain does not string-compare "
+                  "equal to request domain")
+            return False
+
+        if cookie.version > 0 and not domain_match(erhn, domain):
+            debug("   effective request-host name %s does not domain-match "
+                  "RFC 2965 cookie domain %s", erhn, domain)
+            return False
+        if cookie.version == 0 and not endswith("."+erhn, domain):
+            debug("   request-host %s does not match Netscape cookie domain "
+                  "%s", req_host, domain)
+            return False
+        return True
+
+    def domain_return_ok(self, domain, request):
+        # Liberal check of domain.  This is here as an optimization to avoid
+        # having to load lots of MSIE cookie files unless necessary.
+
+        # Munge req_host and erhn to always start with a dot, so as to err on
+        # the side of letting cookies through.
+        dotted_req_host, dotted_erhn = eff_request_host(request)
+        if not startswith(dotted_req_host, "."):
+            dotted_req_host = "."+dotted_req_host
+        if not startswith(dotted_erhn, "."):
+            dotted_erhn = "."+dotted_erhn
+        if not (endswith(dotted_req_host, domain) or
+                endswith(dotted_erhn, domain)):
+            #debug("   request domain %s does not match cookie domain %s",
+            #      req_host, domain)
+            return False
+
+        if self.is_blocked(domain):
+            debug("   domain %s is in user block-list", domain)
+            return False
+        if self.is_not_allowed(domain):
+            debug("   domain %s is not in user allow-list", domain)
+            return False
+
+        return True
+
+    def path_return_ok(self, path, request):
+        debug("- checking cookie path=%s", path)
+        req_path = request_path(request)
+        if not startswith(req_path, path):
+            debug("  %s does not path-match %s", req_path, path)
+            return False
+        return True
+
+
+def vals_sorted_by_key(adict):
+    keys = adict.keys()
+    keys.sort()
+    return map(adict.get, keys)
+
+class MappingIterator:
+    """Iterates over nested mapping, depth-first, in sorted order by key."""
+    def __init__(self, mapping):
+        self._s = [(vals_sorted_by_key(mapping), 0, None)]  # LIFO stack
+
+    def __iter__(self): return self
+
+    def next(self):
+        # this is hairy because of lack of generators
+        while 1:
+            try:
+                vals, i, prev_item = self._s.pop()
+            except IndexError:
+                raise StopIteration()
+            if i < len(vals):
+                item = vals[i]
+                i = i + 1
+                self._s.append((vals, i, prev_item))
+                try:
+                    item.items
+                except AttributeError:
+                    # non-mapping
+                    break
+                else:
+                    # mapping
+                    self._s.append((vals_sorted_by_key(item), 0, item))
+                    continue
+        return item
+
+
+# Used as second parameter to dict.get method, to distinguish absent
+# dict key from one with a None value.
+class Absent: pass
+
+class CookieJar:
+    """Collection of HTTP cookies.
+
+    You may not need to know about this class: try ClientCookie.urlopen().
+
+    The major methods are extract_cookies and add_cookie_header; these are all
+    you are likely to need.
+
+    CookieJar supports the iterator protocol:
+
+    for cookie in cookiejar:
+        # do something with cookie
+
+    Methods:
+
+    add_cookie_header(request)
+    extract_cookies(response, request)
+    make_cookies(response, request)
+    set_cookie_if_ok(cookie, request)
+    set_cookie(cookie)
+    clear_session_cookies()
+    clear_expired_cookies()
+    clear(domain=None, path=None, name=None)
+
+    Public attributes
+
+    policy: CookiePolicy object
+
+    """
+
+    non_word_re = re.compile(r"\W")
+    quote_re = re.compile(r"([\"\\])")
+    strict_domain_re = re.compile(r"\.?[^.]*")
+    domain_re = re.compile(r"[^.]*")
+    dots_re = re.compile(r"^\.+")
+
+    def __init__(self, policy=None):
+        """
+        See CookieJar.__doc__ for argument documentation.
+
+        """
+        if policy is None:
+            policy = DefaultCookiePolicy()
+        self._policy = policy
+
+        self._cookies = {}
+
+        # for __getitem__ iteration in pre-2.2 Pythons
+        self._prev_getitem_index = 0
+
+    def set_policy(self, policy):
+        self._policy = policy
+
+    def _cookies_for_domain(self, domain, request):
+        cookies = []
+        if not self._policy.domain_return_ok(domain, request):
+            return []
+        debug("Checking %s for cookies to return", domain)
+        cookies_by_path = self._cookies[domain]
+        for path in cookies_by_path.keys():
+            if not self._policy.path_return_ok(path, request):
+                continue
+            cookies_by_name = cookies_by_path[path]
+            for cookie in cookies_by_name.values():
+                if not self._policy.return_ok(cookie, request):
+                    debug("   not returning cookie")
+                    continue
+                debug("   it's a match")
+                cookies.append(cookie)
+        return cookies
+
+    def _cookies_for_request(self, request):
+        """Return a list of cookies to be returned to server."""
+        cookies = []
+        for domain in self._cookies.keys():
+            cookies.extend(self._cookies_for_domain(domain, request))
+        return cookies
+
+    def _cookie_attrs(self, cookies):
+        """Return a list of cookie-attributes to be returned to server.
+
+        like ['foo="bar"; $Path="/"', ...]
+
+        The $Version attribute is also added when appropriate (currently only
+        once per request).
+
+        """
+        # add cookies in order of most specific (ie. longest) path first
+        def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
+        cookies.sort(decreasing_size)
+
+        version_set = False
+
+        attrs = []
+        for cookie in cookies:
+            # set version of Cookie header
+            # XXX
+            # What should it be if multiple matching Set-Cookie headers have
+            #  different versions themselves?
+            # Answer: there is no answer; was supposed to be settled by
+            #  RFC 2965 errata, but that may never appear...
+            version = cookie.version
+            if not version_set:
+                version_set = True
+                if version > 0:
+                    attrs.append("$Version=%s" % version)
+
+            # quote cookie value if necessary
+            # (not for Netscape protocol, which already has any quotes
+            #  intact, due to the poorly-specified Netscape Cookie: syntax)
+            if ((cookie.value is not None) and
+                self.non_word_re.search(cookie.value) and version > 0):
+                value = self.quote_re.sub(r"\\\1", cookie.value)
+            else:
+                value = cookie.value
+
+            # add cookie-attributes to be returned in Cookie header
+            if cookie.value is None:
+                attrs.append(cookie.name)
+            else:
+                attrs.append("%s=%s" % (cookie.name, value))
+            if version > 0:
+                if cookie.path_specified:
+                    attrs.append('$Path="%s"' % cookie.path)
+                if startswith(cookie.domain, "."):
+                    domain = cookie.domain
+                    if (not cookie.domain_initial_dot and
+                        startswith(domain, ".")):
+                        domain = domain[1:]
+                    attrs.append('$Domain="%s"' % domain)
+                if cookie.port is not None:
+                    p = "$Port"
+                    if cookie.port_specified:
+                        p = p + ('="%s"' % cookie.port)
+                    attrs.append(p)
+
+        return attrs
+
+    def add_cookie_header(self, request):
+        """Add correct Cookie: header to request (urllib2.Request object).
+
+        The Cookie2 header is also added unless policy.hide_cookie2 is true.
+
+        The request object (usually a urllib2.Request instance) must support
+        the methods get_full_url, get_host, get_type, has_header, get_header,
+        header_items and add_unredirected_header, as documented by urllib2, and
+        the port attribute (the port number).  Actually,
+        RequestUpgradeProcessor will automatically upgrade your Request object
+        to one with has_header, get_header, header_items and
+        add_unredirected_header, if it lacks those methods, for compatibility
+        with pre-2.4 versions of urllib2.
+
+        """
+        debug("add_cookie_header")
+        self._policy._now = self._now = int(time.time())
+
+        req_host, erhn = eff_request_host(request)
+        strict_non_domain = (
+            self._policy.strict_ns_domain & self._policy.DomainStrictNonDomain)
+
+        cookies = self._cookies_for_request(request)
+
+        attrs = self._cookie_attrs(cookies)
+        if attrs:
+            if not request.has_header("Cookie"):
+                request.add_unredirected_header(
+                    "Cookie", string.join(attrs, "; "))
+
+        # if necessary, advertise that we know RFC 2965
+        if self._policy.rfc2965 and not self._policy.hide_cookie2:
+            for cookie in cookies:
+                if cookie.version != 1 and not request.has_header("Cookie2"):
+                    request.add_unredirected_header("Cookie2", '$Version="1"')
+                    break
+
+        self.clear_expired_cookies()
+
+    def _normalized_cookie_tuples(self, attrs_set):
+        """Return list of tuples containing normalised cookie information.
+
+        attrs_set is the list of lists of key,value pairs extracted from
+        the Set-Cookie or Set-Cookie2 headers.
+
+        Tuples are name, value, standard, rest, where name and value are the
+        cookie name and value, standard is a dictionary containing the standard
+        cookie-attributes (discard, secure, version, expires or max-age,
+        domain, path and port) and rest is a dictionary containing the rest of
+        the cookie-attributes.
+
+        """
+        cookie_tuples = []
+
+        boolean_attrs = "discard", "secure"
+        value_attrs = ("version",
+                       "expires", "max-age",
+                       "domain", "path", "port",
+                       "comment", "commenturl")
+
+        for cookie_attrs in attrs_set:
+            name, value = cookie_attrs[0]
+
+            # Build dictionary of standard cookie-attributes (standard) and
+            # dictionary of other cookie-attributes (rest).
+
+            # Note: expiry time is normalised to seconds since epoch.  V0
+            # cookies should have the Expires cookie-attribute, and V1 cookies
+            # should have Max-Age, but since V1 includes RFC 2109 cookies (and
+            # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
+            # accept either (but prefer Max-Age).
+            max_age_set = False
+
+            bad_cookie = False
+
+            standard = {}
+            rest = {}
+            for k, v in cookie_attrs[1:]:
+                lc = string.lower(k)
+                # don't lose case distinction for unknown fields
+                if lc in value_attrs or lc in boolean_attrs:
+                    k = lc
+                if k in boolean_attrs and v is None:
+                    # boolean cookie-attribute is present, but has no value
+                    # (like "discard", rather than "port=80")
+                    v = True
+                if standard.has_key(k):
+                    # only first value is significant
+                    continue
+                if k == "domain":
+                    if v is None:
+                        debug("   missing value for domain attribute")
+                        bad_cookie = True
+                        break
+                    # RFC 2965 section 3.3.3
+                    v = string.lower(v)
+                if k == "expires":
+                    if max_age_set:
+                        # Prefer max-age to expires (like Mozilla)
+                        continue
+                    if v is None:
+                        debug("   missing or invalid value for expires "
+                              "attribute: treating as session cookie")
+                        continue
+                if k == "max-age":
+                    max_age_set = True
+                    try:
+                        v = int(v)
+                    except ValueError:
+                        debug("   missing or invalid (non-numeric) value for "
+                              "max-age attribute")
+                        bad_cookie = True
+                        break
+                    # convert RFC 2965 Max-Age to seconds since epoch
+                    # XXX Strictly you're supposed to follow RFC 2616
+                    #   age-calculation rules.  Remember that zero Max-Age is a
+                    #   is a request to discard (old and new) cookie, though.
+                    k = "expires"
+                    v = self._now + v
+                if (k in value_attrs) or (k in boolean_attrs):
+                    if (v is None and
+                        k not in ["port", "comment", "commenturl"]):
+                        debug("   missing value for %s attribute" % k)
+                        bad_cookie = True
+                        break
+                    standard[k] = v
+                else:
+                    rest[k] = v
+
+            if bad_cookie:
+                continue
+
+            cookie_tuples.append((name, value, standard, rest))
+
+        return cookie_tuples
+
+    def _cookie_from_cookie_tuple(self, tup, request):
+        # standard is dict of standard cookie-attributes, rest is dict of the
+        # rest of them
+        name, value, standard, rest = tup
+
+        domain = standard.get("domain", Absent)
+        path = standard.get("path", Absent)
+        port = standard.get("port", Absent)
+        expires = standard.get("expires", Absent)
+
+        # set the easy defaults
+        version = standard.get("version", None)
+        if version is not None: version = int(version)
+        secure = standard.get("secure", False)
+        # (discard is also set if expires is Absent)
+        discard = standard.get("discard", False)
+        comment = standard.get("comment", None)
+        comment_url = standard.get("commenturl", None)
+
+        # set default path
+        if path is not Absent and path != "":
+            path_specified = True
+            path = escape_path(path)
+        else:
+            path_specified = False
+            path = request_path(request)
+            i = string.rfind(path, "/")
+            if i != -1:
+                if version == 0:
+                    # Netscape spec parts company from reality here
+                    path = path[:i]
+                else:
+                    path = path[:i+1]
+            if len(path) == 0: path = "/"
+
+        # set default domain
+        domain_specified = domain is not Absent
+        # but first we have to remember whether it starts with a dot
+        domain_initial_dot = False
+        if domain_specified:
+            domain_initial_dot = bool(startswith(domain, "."))
+        if domain is Absent:
+            req_host, erhn = eff_request_host(request)
+            domain = erhn
+        elif not startswith(domain, "."):
+            domain = "."+domain
+
+        # set default port
+        port_specified = False
+        if port is not Absent:
+            if port is None:
+                # Port attr present, but has no value: default to request port.
+                # Cookie should then only be sent back on that port.
+                port = request_port(request)
+            else:
+                port_specified = True
+                port = re.sub(r"\s+", "", port)
+        else:
+            # No port attr present.  Cookie can be sent back on any port.
+            port = None
+
+        # set default expires and discard
+        if expires is Absent:
+            expires = None
+            discard = True
+        elif expires <= self._now:
+            # Expiry date in past is request to delete cookie.  This can't be
+            # in DefaultCookiePolicy, because can't delete cookies there.
+            try:
+                self.clear(domain, path, name)
+            except KeyError:
+                pass
+            debug("Expiring cookie, domain='%s', path='%s', name='%s'",
+                  domain, path, name)
+            return None
+
+        return Cookie(version,
+                      name, value,
+                      port, port_specified,
+                      domain, domain_specified, domain_initial_dot,
+                      path, path_specified,
+                      secure,
+                      expires,
+                      discard,
+                      comment,
+                      comment_url,
+                      rest)
+
+    def _cookies_from_attrs_set(self, attrs_set, request):
+        cookie_tuples = self._normalized_cookie_tuples(attrs_set)
+
+        cookies = []
+        for tup in cookie_tuples:
+            cookie = self._cookie_from_cookie_tuple(tup, request)
+            if cookie: cookies.append(cookie)
+        return cookies
+
+    def _process_rfc2109_cookies(self, cookies):
+        if self._policy.rfc2109_as_netscape is None:
+            rfc2109_as_netscape = not self._policy.rfc2965
+        else:
+            rfc2109_as_netscape = self._policy.rfc2109_as_netscape
+        for cookie in cookies:
+            if cookie.version == 1:
+                cookie.rfc2109 = True
+                if rfc2109_as_netscape: 
+                    # treat 2109 cookies as Netscape cookies rather than
+                    # as RFC2965 cookies
+                    cookie.version = 0
+
+    def make_cookies(self, response, request):
+        """Return sequence of Cookie objects extracted from response object.
+
+        See extract_cookies.__doc__ for the interfaces required of the
+        response and request arguments.
+
+        """
+        # get cookie-attributes for RFC 2965 and Netscape protocols
+        headers = response.info()
+        rfc2965_hdrs = getheaders(headers, "Set-Cookie2")
+        ns_hdrs = getheaders(headers, "Set-Cookie")
+
+        rfc2965 = self._policy.rfc2965
+        netscape = self._policy.netscape
+
+        if ((not rfc2965_hdrs and not ns_hdrs) or
+            (not ns_hdrs and not rfc2965) or
+            (not rfc2965_hdrs and not netscape) or
+            (not netscape and not rfc2965)):
+            return []  # no relevant cookie headers: quick exit
+
+        try:
+            cookies = self._cookies_from_attrs_set(
+                split_header_words(rfc2965_hdrs), request)
+        except:
+            reraise_unmasked_exceptions()
+            cookies = []
+
+        if ns_hdrs and netscape:
+            try:
+                # RFC 2109 and Netscape cookies
+                ns_cookies = self._cookies_from_attrs_set(
+                    parse_ns_headers(ns_hdrs), request)
+            except:
+                reraise_unmasked_exceptions()
+                ns_cookies = []
+            self._process_rfc2109_cookies(ns_cookies)
+
+            # Look for Netscape cookies (from Set-Cookie headers) that match
+            # corresponding RFC 2965 cookies (from Set-Cookie2 headers).
+            # For each match, keep the RFC 2965 cookie and ignore the Netscape
+            # cookie (RFC 2965 section 9.1).  Actually, RFC 2109 cookies are
+            # bundled in with the Netscape cookies for this purpose, which is
+            # reasonable behaviour.
+            if rfc2965:
+                lookup = {}
+                for cookie in cookies:
+                    lookup[(cookie.domain, cookie.path, cookie.name)] = None
+
+                def no_matching_rfc2965(ns_cookie, lookup=lookup):
+                    key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
+                    return not lookup.has_key(key)
+                ns_cookies = filter(no_matching_rfc2965, ns_cookies)
+
+            if ns_cookies:
+                cookies.extend(ns_cookies)
+
+        return cookies
+
+    def set_cookie_if_ok(self, cookie, request):
+        """Set a cookie if policy says it's OK to do so.
+
+        cookie: ClientCookie.Cookie instance
+        request: see extract_cookies.__doc__ for the required interface
+
+        """
+        self._policy._now = self._now = int(time.time())
+
+        if self._policy.set_ok(cookie, request):
+            self.set_cookie(cookie)
+
+    def set_cookie(self, cookie):
+        """Set a cookie, without checking whether or not it should be set.
+
+        cookie: ClientCookie.Cookie instance
+        """
+        c = self._cookies
+        if not c.has_key(cookie.domain): c[cookie.domain] = {}
+        c2 = c[cookie.domain]
+        if not c2.has_key(cookie.path): c2[cookie.path] = {}
+        c3 = c2[cookie.path]
+        c3[cookie.name] = cookie
+
+    def extract_cookies(self, response, request):
+        """Extract cookies from response, where allowable given the request.
+
+        Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
+        object passed as argument.  Any of these headers that are found are
+        used to update the state of the object (subject to the policy.set_ok
+        method's approval).
+
+        The response object (usually be the result of a call to
+        ClientCookie.urlopen, or similar) should support an info method, which
+        returns a mimetools.Message object (in fact, the 'mimetools.Message
+        object' may be any object that provides a getallmatchingheaders
+        method).
+
+        The request object (usually a urllib2.Request instance) must support
+        the methods get_full_url and get_host, as documented by urllib2, and
+        the port attribute (the port number).  The request is used to set
+        default values for cookie-attributes as well as for checking that the
+        cookie is OK to be set.
+
+        """
+        debug("extract_cookies: %s", response.info())
+        self._policy._now = self._now = int(time.time())
+
+        for cookie in self.make_cookies(response, request):
+            if self._policy.set_ok(cookie, request):
+                debug(" setting cookie: %s", cookie)
+                self.set_cookie(cookie)
+
+    def clear(self, domain=None, path=None, name=None):
+        """Clear some cookies.
+
+        Invoking this method without arguments will clear all cookies.  If
+        given a single argument, only cookies belonging to that domain will be
+        removed.  If given two arguments, cookies belonging to the specified
+        path within that domain are removed.  If given three arguments, then
+        the cookie with the specified name, path and domain is removed.
+
+        Raises KeyError if no matching cookie exists.
+
+        """
+        if name is not None:
+            if (domain is None) or (path is None):
+                raise ValueError(
+                    "domain and path must be given to remove a cookie by name")
+            del self._cookies[domain][path][name]
+        elif path is not None:
+            if domain is None:
+                raise ValueError(
+                    "domain must be given to remove cookies by path")
+            del self._cookies[domain][path]
+        elif domain is not None:
+            del self._cookies[domain]
+        else:
+            self._cookies = {}
+
+    def clear_session_cookies(self):
+        """Discard all session cookies.
+
+        Discards all cookies held by object which had either no Max-Age or
+        Expires cookie-attribute or an explicit Discard cookie-attribute, or
+        which otherwise have ended up with a true discard attribute.  For
+        interactive browsers, the end of a session usually corresponds to
+        closing the browser window.
+
+        Note that the save method won't save session cookies anyway, unless you
+        ask otherwise by passing a true ignore_discard argument.
+
+        """
+        for cookie in self:
+            if cookie.discard:
+                self.clear(cookie.domain, cookie.path, cookie.name)
+
+    def clear_expired_cookies(self):
+        """Discard all expired cookies.
+
+        You probably don't need to call this method: expired cookies are never
+        sent back to the server (provided you're using DefaultCookiePolicy),
+        this method is called by CookieJar itself every so often, and the save
+        method won't save expired cookies anyway (unless you ask otherwise by
+        passing a true ignore_expires argument).
+
+        """
+        now = time.time()
+        for cookie in self:
+            if cookie.is_expired(now):
+                self.clear(cookie.domain, cookie.path, cookie.name)
+
+    def __getitem__(self, i):
+        if i == 0:
+            self._getitem_iterator = self.__iter__()
+        elif self._prev_getitem_index != i-1: raise IndexError(
+            "CookieJar.__getitem__ only supports sequential iteration")
+        self._prev_getitem_index = i
+        try:
+            return self._getitem_iterator.next()
+        except StopIteration:
+            raise IndexError()
+
+    def __iter__(self):
+        return MappingIterator(self._cookies)
+
+    def __len__(self):
+        """Return number of contained cookies."""
+        i = 0
+        for cookie in self: i = i + 1
+        return i
+
+    def __repr__(self):
+        r = []
+        for cookie in self: r.append(repr(cookie))
+        return "<%s[%s]>" % (self.__class__, string.join(r, ", "))
+
+    def __str__(self):
+        r = []
+        for cookie in self: r.append(str(cookie))
+        return "<%s[%s]>" % (self.__class__, string.join(r, ", "))
+
+
+class LoadError(Exception): pass
+
+class FileCookieJar(CookieJar):
+    """CookieJar that can be loaded from and saved to a file.
+
+    Additional methods
+
+    save(filename=None, ignore_discard=False, ignore_expires=False)
+    load(filename=None, ignore_discard=False, ignore_expires=False)
+    revert(filename=None, ignore_discard=False, ignore_expires=False)
+
+    Additional public attributes
+
+    filename: filename for loading and saving cookies
+
+    Additional public readable attributes
+
+    delayload: request that cookies are lazily loaded from disk; this is only
+     a hint since this only affects performance, not behaviour (unless the
+     cookies on disk are changing); a CookieJar object may ignore it (in fact,
+     only MSIECookieJar lazily loads cookies at the moment)
+
+    """
+
+    def __init__(self, filename=None, delayload=False, policy=None):
+        """
+        See FileCookieJar.__doc__ for argument documentation.
+
+        Cookies are NOT loaded from the named file until either the load or
+        revert method is called.
+
+        """
+        CookieJar.__init__(self, policy)
+        if filename is not None and not isstringlike(filename):
+            raise ValueError("filename must be string-like")
+        self.filename = filename
+        self.delayload = bool(delayload)
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        """Save cookies to a file.
+
+        filename: name of file in which to save cookies
+        ignore_discard: save even cookies set to be discarded
+        ignore_expires: save even cookies that have expired
+
+        The file is overwritten if it already exists, thus wiping all its
+        cookies.  Saved cookies can be restored later using the load or revert
+        methods.  If filename is not specified, self.filename is used; if
+        self.filename is None, ValueError is raised.
+
+        """
+        raise NotImplementedError()
+
+    def load(self, filename=None, ignore_discard=False, ignore_expires=False):
+        """Load cookies from a file.
+
+        Old cookies are kept unless overwritten by newly loaded ones.
+
+        Arguments are as for .save().
+
+        If filename is not specified, self.filename is used; if self.filename
+        is None, ValueError is raised.  The named file must be in the format
+        understood by the class, or LoadError will be raised.  This format will
+        be identical to that written by the save method, unless the load format
+        is not sufficiently well understood (as is the case for MSIECookieJar).
+
+        """
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        f = open(filename)
+        try:
+            self._really_load(f, filename, ignore_discard, ignore_expires)
+        finally:
+            f.close()
+
+    def revert(self, filename=None,
+               ignore_discard=False, ignore_expires=False):
+        """Clear all cookies and reload cookies from a saved file.
+
+        Raises LoadError (or IOError) if reversion is not successful; the
+        object's state will not be altered if this happens.
+
+        """
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        old_state = copy.deepcopy(self._cookies)
+        self._cookies = {}
+        try:
+            self.load(filename, ignore_discard, ignore_expires)
+        except (LoadError, IOError):
+            self._cookies = old_state
+            raise


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ClientCookie.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ConnCache.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ConnCache.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ConnCache.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,239 @@
+"""Generic connection cache manager.
+
+Example:
+
+ from ClientCookie import ConnectionCache
+ cache = ConnectionCache()
+ cache.deposit("http", "example.com", conn)
+ conn = cache.withdraw("http", "example.com")
+
+
+The ConnectionCache class provides cache expiration.
+
+
+Copyright (C) 2004 John J Lee <jjl at pobox.com>.
+Copyright (C) 2001 Gisle Aas.
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+# Ported from libwww-perl 5.75.
+
+import time
+try:
+    from types import StringTypes
+except ImportError:
+    from types import StringType
+    StringTypes = StringType
+
+from _Util import compat_isinstance
+from _Debug import getLogger
+debug = getLogger("ClientCookie").debug
+
+class _ConnectionRecord:
+    def __init__(self, conn, scheme, key, time):
+        self.conn, self.scheme, self.key, self.time = conn, scheme, key, time
+    def __repr__(self):
+        return "%s(%s, %s, %s, %s)" % (
+            self.__class__.__name__,
+            self.conn, self.scheme, self.key, self.time)
+
+class ConnectionCache:
+    """
+    For specialized cache policy it makes sense to subclass ConnectionCache and
+    perhaps override the .deposit(), ._enforce_limits() and ._dropping()
+    methods.
+
+    """
+    def __init__(self, total_capacity=1):
+        self._limit = {}
+        self.total_capacity(total_capacity)
+
+    def set_total_capacity(self, nr_connections):
+        """Set limit for number of cached connections.
+
+        Connections will start to be dropped when this limit is reached.  If 0,
+        all connections are immediately dropped.  None means no limit.
+
+        """
+        self._limit_total = nr_connections
+        self._enforce_limits()
+
+    def total_capacity(self):
+        """Return limit for number of cached connections."""
+        return self._limit_total
+
+    def set_capacity(self, scheme, nr_connections):
+        """Set limit for number of cached connections of specifed scheme.
+
+        scheme: URL scheme (eg. "http" or "ftp")
+
+        """
+        self._limit[scheme] = nr_connections
+        self._enforce_limits(scheme)
+
+    def capacity(self, scheme):
+        """Return limit for number of cached connections of specifed scheme.
+
+        scheme: URL scheme (eg. "http" or "ftp")
+
+        """
+        return self._limit[scheme]
+
+    def drop(self, checker=None, reason=None):
+        """Drop connections by some criteria.
+
+        checker: either a callable, a number, a string, or None:
+         If callable: called for each connection with arguments (conn, scheme,
+          key, deposit_time); if it returns a true value, the connection is
+          dropped (default is to drop all connections).
+         If a number: all connections untouched for the given number of seconds
+          or more are dropped.
+         If a string: all connections of the given scheme are dropped.
+         If None: all connections are dropped.
+        reason: passed on to the dropped() method
+
+        """
+        if not callable(checker):
+            if checker is None:
+                checker = lambda cr: True  # drop all of them
+            elif compat_isinstance(checker, StringTypes):
+                scheme = checker
+                if reason is None:
+                    reason = "drop %s" % scheme
+                checker = lambda cr, scheme=scheme: cr.scheme == scheme
+            else:  # numeric
+                age_limit = checker
+                time_limit = time.time() - age_limit
+                if reason is None:
+                    reason = "older than %s" % age_limit
+                checker = lambda cr, time_limit=time_limit: cr.time < time_limit
+        if reason is None:
+            reason = "drop"
+
+##         local $SIG{__DIE__};  # don't interfere with eval below
+##         local $@;
+        crs = []
+        for cr in self._conns:
+            if checker(cr):
+                self._dropping(cr, reason)
+                drop = drop + 1
+            if not drop:
+                crs.append(cr)
+        self._conns = crs
+
+    def prune(self):
+        """Drop all dead connections.
+
+        This is tested by calling the .ping() method on the connections.  If
+        the .ping() method exists and returns a false value, then the
+        connection is dropped.
+
+        """
+        # XXX HTTPConnection doesn't have a .ping() method
+        #self.drop(lambda cr: not cr.conn.ping(), "ping")
+        pass
+
+    def get_schemes(self):
+        """Return list of cached connection URL schemes."""
+        t = {}
+        for cr in self._conns:
+            t[cr.scheme] = None
+        return t.keys()
+
+    def get_connections(self, scheme=None):
+        """Return list of all connection objects with the specified URL scheme.
+
+        If no scheme is specified then all connections are returned.
+
+        """
+        cs = []
+        for cr in self._conns:
+            if scheme is None or (scheme and scheme == cr.scheme):
+                c.append(cr.conn)
+        return cs
+
+# -------------------------------------------------------------------------
+# Methods called by handlers to try to save away connections and get them
+# back again.
+
+    def deposit(self, scheme, key, conn):
+        """Add a new connection to the cache.
+
+        scheme: URL scheme (eg. "http")
+        key: any object that can act as a dict key (usually a string or a
+         tuple)
+
+        As a side effect, other already cached connections may be dropped.
+        Multiple connections with the same scheme/key might be added.
+
+        """
+        self._conns.append(_ConnectionRecord(conn, scheme, key, time.time()))
+        self._enforce_limits(scheme)
+
+    def withdraw(self, scheme, key):
+        """Try to fetch back a connection that was previously deposited.
+
+        If no cached connection with the specified scheme/key is found, then
+        None is returned.  There is no guarantee that a deposited connection
+        can be withdrawn, as the cache manger is free to drop connections at
+        any time.
+
+        """
+        conns = self._conns
+        for i in range(len(conns)):
+            cr = conns[i]
+            if not (cr.scheme == scheme and cr.key == key):
+                continue
+            conns.pop(i)  # remove it
+            return cr.conn
+        return None
+
+# -------------------------------------------------------------------------
+# Called internally.  Subclasses might want to override these.
+
+    def _enforce_limits(self, scheme=None):
+        """Drop some cached connections, if necessary.
+
+        Called after a new connection is added (deposited) in the cache or
+        capacity limits are adjusted.
+
+        The default implementation drops connections until the specified
+        capacity limits are not exceeded.
+
+        """
+        conns = self._conns
+        if scheme:
+            schemes = [scheme]
+        else:
+            schemes = self.get_schemes()
+        for scheme in schemes:
+            limit = self._limit.get(scheme)
+            if limit is None:
+                continue
+            for i in range(len(conns), 0, -1):
+                if conns[i].scheme != scheme:
+                    continue
+                limit = limit - 1
+                if limit < 0:
+                    self._dropping(
+                        conns.pop(i),
+                        "connection cache %s capacity exceeded" % scheme)
+
+        total = self._limit_total
+        if total is not None:
+            while len(conns) > total:
+                self._dropping(conns.pop(0),
+                               "connection cache total capacity exceeded")
+
+    def _dropping(self, conn_record, reason):
+        """Called when a connection is dropped.
+
+        conn_record: _ConnectionRecord instance for the dropped connection
+        reason: string describing the reason for the drop
+
+        """
+        debug("DROPPING %s [%s]" % (conn_record, reason))


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_ConnCache.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Debug.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Debug.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Debug.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,49 @@
+import sys
+
+import ClientCookie
+
+try:
+    import warnings
+except ImportError:
+    def warn(text):
+        ClientCookie.WARNINGS_STREAM.write("WARNING: "+text)
+else:
+    def warn(text):
+        warnings.warn(text, stacklevel=2)
+
+try:
+    import logging
+except:
+    NOTSET = None
+    INFO = 20
+    DEBUG = 10
+    class NullHandler:
+        def write(self, data): pass
+    class Logger:
+        def __init__(self):
+            self.level = NOTSET
+            self.handler = NullHandler()
+        def log(self, level, text, *args):
+            if args:
+                text = text % args
+            if self.level is not None and level <= self.level:
+                self.handler.write(text+"\n")
+        def debug(self, text, *args):
+            apply(self.log, (DEBUG, text)+args)
+        def info(self, text, *args):
+            apply(self.log, (INFO, text)+args)
+        def setLevel(self, lvl):
+            self.level = lvl
+        def addHandler(self, handler):
+            self.handler = handler
+    LOGGER = Logger()
+    def getLogger(name): return LOGGER
+    class StreamHandler:
+        def __init__(self, strm=None):
+            if not strm:
+                strm = sys.stderr
+            self.stream = strm
+        def write(self, data):
+            self.stream.write(data)
+else:
+    from logging import getLogger, StreamHandler, INFO, DEBUG, NOTSET


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Debug.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_HeadersUtil.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_HeadersUtil.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_HeadersUtil.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,210 @@
+"""Utility functions for HTTP header value parsing and construction.
+
+Copyright 1997-1998, Gisle Aas
+Copyright 2002-2004, John J. Lee
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+import re, string
+from types import StringType
+try:
+    from types import UnicodeType
+    STRING_TYPES = StringType, UnicodeType
+except:
+    STRING_TYPES = StringType,
+
+from _Util import startswith, endswith, http2time
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+def unmatched(match):
+    """Return unmatched part of re.Match object."""
+    start, end = match.span(0)
+    return match.string[:start]+match.string[end:]
+
+token_re =        re.compile(r"^\s*([^=\s;,]+)")
+quoted_value_re = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
+value_re =        re.compile(r"^\s*=\s*([^\s;,]*)")
+escape_re = re.compile(r"\\(.)")
+def split_header_words(header_values):
+    r"""Parse header values into a list of lists containing key,value pairs.
+
+    The function knows how to deal with ",", ";" and "=" as well as quoted
+    values after "=".  A list of space separated tokens are parsed as if they
+    were separated by ";".
+
+    If the header_values passed as argument contains multiple values, then they
+    are treated as if they were a single value separated by comma ",".
+
+    This means that this function is useful for parsing header fields that
+    follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
+    the requirement for tokens).
+
+      headers           = #header
+      header            = (token | parameter) *( [";"] (token | parameter))
+
+      token             = 1*<any CHAR except CTLs or separators>
+      separators        = "(" | ")" | "<" | ">" | "@"
+                        | "," | ";" | ":" | "\" | <">
+                        | "/" | "[" | "]" | "?" | "="
+                        | "{" | "}" | SP | HT
+
+      quoted-string     = ( <"> *(qdtext | quoted-pair ) <"> )
+      qdtext            = <any TEXT except <">>
+      quoted-pair       = "\" CHAR
+
+      parameter         = attribute "=" value
+      attribute         = token
+      value             = token | quoted-string
+
+    Each header is represented by a list of key/value pairs.  The value for a
+    simple token (not part of a parameter) is None.  Syntactically incorrect
+    headers will not necessarily be parsed as you would want.
+
+    This is easier to describe with some examples:
+
+    >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
+    [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
+    >>> split_header_words(['text/html; charset="iso-8859-1"'])
+    [[('text/html', None), ('charset', 'iso-8859-1')]]
+    >>> split_header_words([r'Basic realm="\"foo\bar\""'])
+    [[('Basic', None), ('realm', '"foobar"')]]
+
+    """
+    assert type(header_values) not in STRING_TYPES
+    result = []
+    for text in header_values:
+        orig_text = text
+        pairs = []
+        while text:
+            m = token_re.search(text)
+            if m:
+                text = unmatched(m)
+                name = m.group(1)
+                m = quoted_value_re.search(text)
+                if m:  # quoted value
+                    text = unmatched(m)
+                    value = m.group(1)
+                    value = escape_re.sub(r"\1", value)
+                else:
+                    m = value_re.search(text)
+                    if m:  # unquoted value
+                        text = unmatched(m)
+                        value = m.group(1)
+                        value = string.rstrip(value)
+                    else:
+                        # no value, a lone token
+                        value = None
+                pairs.append((name, value))
+            elif startswith(string.lstrip(text), ","):
+                # concatenated headers, as per RFC 2616 section 4.2
+                text = string.lstrip(text)[1:]
+                if pairs: result.append(pairs)
+                pairs = []
+            else:
+                # skip junk
+                non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
+                assert nr_junk_chars > 0, (
+                    "split_header_words bug: '%s', '%s', %s" %
+                    (orig_text, text, pairs))
+                text = non_junk
+        if pairs: result.append(pairs)
+    return result
+
+join_escape_re = re.compile(r"([\"\\])")
+def join_header_words(lists):
+    """Do the inverse of the conversion done by split_header_words.
+
+    Takes a list of lists of (key, value) pairs and produces a single header
+    value.  Attribute values are quoted if needed.
+
+    >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
+    'text/plain; charset="iso-8859/1"'
+    >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
+    'text/plain, charset="iso-8859/1"'
+
+    """
+    headers = []
+    for pairs in lists:
+        attr = []
+        for k, v in pairs:
+            if v is not None:
+                if not re.search(r"^\w+$", v):
+                    v = join_escape_re.sub(r"\\\1", v)  # escape " and \
+                    v = '"%s"' % v
+                if k is None:  # Netscape cookies may have no name
+                    k = v
+                else:
+                    k = "%s=%s" % (k, v)
+            attr.append(k)
+        if attr: headers.append(string.join(attr, "; "))
+    return string.join(headers, ", ")
+
+def parse_ns_headers(ns_headers):
+    """Ad-hoc parser for Netscape protocol cookie-attributes.
+
+    The old Netscape cookie format for Set-Cookie can for instance contain
+    an unquoted "," in the expires field, so we have to use this ad-hoc
+    parser instead of split_header_words.
+
+    XXX This may not make the best possible effort to parse all the crap
+    that Netscape Cookie headers contain.  Ronald Tschalar's HTTPClient
+    parser is probably better, so could do worse than following that if
+    this ever gives any trouble.
+
+    Currently, this is also used for parsing RFC 2109 cookies.
+
+    """
+    known_attrs = ("expires", "domain", "path", "secure",
+                   # RFC 2109 attrs (may turn up in Netscape cookies, too)
+                   "port", "max-age")
+
+    result = []
+    for ns_header in ns_headers:
+        pairs = []
+        version_set = False
+        params = re.split(r";\s*", ns_header)
+        for ii in range(len(params)):
+            param = params[ii]
+            param = string.rstrip(param)
+            if param == "": continue
+            if "=" not in param:
+                k, v = param, None
+            else:
+                k, v = re.split(r"\s*=\s*", param, 1)
+                k = string.lstrip(k)
+            if ii != 0:
+                lc = string.lower(k)
+                if lc in known_attrs:
+                    k = lc
+                if k == "version":
+                    # This is an RFC 2109 cookie.
+                    version_set = True
+                if k == "expires":
+                    # convert expires date to seconds since epoch
+                    if startswith(v, '"'): v = v[1:]
+                    if endswith(v, '"'): v = v[:-1]
+                    v = http2time(v)  # None if invalid
+            pairs.append((k, v))
+
+        if pairs:
+            if not version_set:
+                pairs.append(("version", "0"))
+            result.append(pairs)
+
+    return result
+
+
+def _test():
+   import doctest, _HeadersUtil
+   return doctest.testmod(_HeadersUtil)
+
+if __name__ == "__main__":
+   _test()


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_HeadersUtil.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_LWPCookieJar.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_LWPCookieJar.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_LWPCookieJar.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,188 @@
+"""Load / save to libwww-perl (LWP) format files.
+
+Actually, the format is slightly extended from that used by LWP's
+(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
+not recorded by LWP.
+
+It uses the version string "2.0", though really there isn't an LWP Cookies
+2.0 format.  This indicates that there is extra information in here
+(domain_dot and port_spec) while still being compatible with libwww-perl,
+I hope.
+
+Copyright 2002-2004 John J Lee <jjl at pobox.com>
+Copyright 1997-1999 Gisle Aas (original libwww-perl code)
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+import time, re, string
+from _ClientCookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
+     MISSING_FILENAME_TEXT, LoadError
+from _HeadersUtil import join_header_words, split_header_words
+from _Util import startswith, iso2time, time2isoz
+from _Debug import getLogger
+debug = getLogger("ClientCookie").debug
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+def lwp_cookie_str(cookie):
+    """Return string representation of Cookie in an the LWP cookie file format.
+
+    Actually, the format is extended a bit -- see module docstring.
+
+    """
+    h = [(cookie.name, cookie.value),
+         ("path", cookie.path),
+         ("domain", cookie.domain)]
+    if cookie.port is not None: h.append(("port", cookie.port))
+    if cookie.path_specified: h.append(("path_spec", None))
+    if cookie.port_specified: h.append(("port_spec", None))
+    if cookie.domain_initial_dot: h.append(("domain_dot", None))
+    if cookie.secure: h.append(("secure", None))
+    if cookie.expires: h.append(("expires",
+                               time2isoz(float(cookie.expires))))
+    if cookie.discard: h.append(("discard", None))
+    if cookie.comment: h.append(("comment", cookie.comment))
+    if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
+    if cookie.rfc2109: h.append(("rfc2109", None))
+
+    keys = cookie.nonstandard_attr_keys()
+    keys.sort()
+    for k in keys:
+        h.append((k, str(cookie.get_nonstandard_attr(k))))
+
+    h.append(("version", str(cookie.version)))
+
+    return join_header_words([h])
+
+class LWPCookieJar(FileCookieJar):
+    """
+    The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
+    "Set-Cookie3" is the format used by the libwww-perl libary, not known
+    to be compatible with any browser, but which is easy to read and
+    doesn't lose information about RFC 2965 cookies.
+
+    Additional methods
+
+    as_lwp_str(ignore_discard=True, ignore_expired=True)
+
+    """
+
+    magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
+
+    def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
+        """Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
+
+        ignore_discard and ignore_expires: see docstring for FileCookieJar.save
+
+        """
+        now = time.time()
+        r = []
+        for cookie in self:
+            if not ignore_discard and cookie.discard:
+                debug("   Not saving %s: marked for discard", cookie.name)
+                continue
+            if not ignore_expires and cookie.is_expired(now):
+                debug("   Not saving %s: expired", cookie.name)
+                continue
+            r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
+        return string.join(r+[""], "\n")
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        f = open(filename, "w")
+        try:
+            debug("Saving LWP cookies file")
+            # There really isn't an LWP Cookies 2.0 format, but this indicates
+            # that there is extra information in here (domain_dot and
+            # port_spec) while still being compatible with libwww-perl, I hope.
+            f.write("#LWP-Cookies-2.0\n")
+            f.write(self.as_lwp_str(ignore_discard, ignore_expires))
+        finally:
+            f.close()
+
+    def _really_load(self, f, filename, ignore_discard, ignore_expires):
+        magic = f.readline()
+        if not re.search(self.magic_re, magic):
+            msg = "%s does not seem to contain cookies" % filename
+            raise LoadError(msg)
+
+        now = time.time()
+
+        header = "Set-Cookie3:"
+        boolean_attrs = ("port_spec", "path_spec", "domain_dot",
+                         "secure", "discard", "rfc2109")
+        value_attrs = ("version",
+                       "port", "path", "domain",
+                       "expires",
+                       "comment", "commenturl")
+
+        try:
+            while 1:
+                line = f.readline()
+                if line == "": break
+                if not startswith(line, header):
+                    continue
+                line = string.strip(line[len(header):])
+
+                for data in split_header_words([line]):
+                    name, value = data[0]
+                    standard = {}
+                    rest = {}
+                    for k in boolean_attrs:
+                        standard[k] = False
+                    for k, v in data[1:]:
+                        if k is not None:
+                            lc = string.lower(k)
+                        else:
+                            lc = None
+                        # don't lose case distinction for unknown fields
+                        if (lc in value_attrs) or (lc in boolean_attrs):
+                            k = lc
+                        if k in boolean_attrs:
+                            if v is None: v = True
+                            standard[k] = v
+                        elif k in value_attrs:
+                            standard[k] = v
+                        else:
+                            rest[k] = v
+
+                    h = standard.get
+                    expires = h("expires")
+                    discard = h("discard")
+                    if expires is not None:
+                        expires = iso2time(expires)
+                    if expires is None:
+                        discard = True
+                    domain = h("domain")
+                    domain_specified = startswith(domain, ".")
+                    c = Cookie(h("version"), name, value,
+                               h("port"), h("port_spec"),
+                               domain, domain_specified, h("domain_dot"),
+                               h("path"), h("path_spec"),
+                               h("secure"),
+                               expires,
+                               discard,
+                               h("comment"),
+                               h("commenturl"),
+                               rest,
+                               h("rfc2109"),
+                               ) 
+                    if not ignore_discard and c.discard:
+                        continue
+                    if not ignore_expires and c.is_expired(now):
+                        continue
+                    self.set_cookie(c)
+        except:
+            reraise_unmasked_exceptions((IOError,))
+            raise LoadError("invalid Set-Cookie3 format file %s" % filename)
+


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_LWPCookieJar.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIECookieJar.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIECookieJar.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIECookieJar.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,393 @@
+"""Microsoft Internet Explorer cookie loading on Windows.
+
+Copyright 2002-2003 Johnny Lee <typo_pl at hotmail.com> (MSIE Perl code)
+Copyright 2002-2004 John J Lee <jjl at pobox.com> (The Python port)
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+# XXX names and comments are not great here
+
+import os, re, string, time, struct
+if os.name == "nt":
+    import _winreg
+
+from _ClientCookie import FileCookieJar, CookieJar, Cookie, \
+     MISSING_FILENAME_TEXT, LoadError
+from _Util import startswith
+from _Debug import getLogger
+debug = getLogger("ClientCookie").debug
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+
+def regload(path, leaf):
+    key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
+                          _winreg.KEY_ALL_ACCESS)
+    try:
+        value = _winreg.QueryValueEx(key, leaf)[0]
+    except WindowsError:
+        value = None
+    return value
+
+WIN32_EPOCH = 0x019db1ded53e8000L  # 1970 Jan 01 00:00:00 in Win32 FILETIME
+
+def epoch_time_offset_from_win32_filetime(filetime):
+    """Convert from win32 filetime to seconds-since-epoch value.
+
+    MSIE stores create and expire times as Win32 FILETIME, which is 64
+    bits of 100 nanosecond intervals since Jan 01 1601.
+
+    ClientCookie expects time in 32-bit value expressed in seconds since the
+    epoch (Jan 01 1970).
+
+    """
+    if filetime < WIN32_EPOCH:
+        raise ValueError("filetime (%d) is before epoch (%d)" %
+                         (filetime, WIN32_EPOCH))
+
+    return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
+
+def binary_to_char(c): return "%02X" % ord(c)
+def binary_to_str(d): return string.join(map(binary_to_char, list(d)), "")
+
+class MSIEBase:
+    magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
+    padding = "\x0d\xf0\xad\x0b"
+
+    msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
+    cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
+                           "(.+\@[\x21-\xFF]+\.txt)")
+
+    # path under HKEY_CURRENT_USER from which to get location of index.dat
+    reg_path = r"software\microsoft\windows" \
+               r"\currentversion\explorer\shell folders"
+    reg_key = "Cookies"
+
+    def __init__(self):
+        self._delayload_domains = {}
+
+    def _delayload_domain(self, domain):
+        # if necessary, lazily load cookies for this domain
+        delayload_info = self._delayload_domains.get(domain)
+        if delayload_info is not None:
+            cookie_file, ignore_discard, ignore_expires = delayload_info
+            try:
+                self.load_cookie_data(cookie_file,
+                                      ignore_discard, ignore_expires)
+            except (LoadError, IOError):
+                debug("error reading cookie file, skipping: %s", cookie_file)
+            else:
+                del self._delayload_domains[domain]
+
+    def _load_cookies_from_file(self, filename):
+        debug("Loading MSIE cookies file: %s", filename)
+        cookies = []
+
+        cookies_fh = open(filename)
+
+        try:
+            while 1:
+                key = cookies_fh.readline()
+                if key == "": break
+
+                rl = cookies_fh.readline
+                def getlong(rl=rl): return long(rl().rstrip())
+                def getstr(rl=rl): return rl().rstrip()
+
+                key = key.rstrip()
+                value = getstr()
+                domain_path = getstr()
+                flags = getlong()  # 0x2000 bit is for secure I think
+                lo_expire = getlong()
+                hi_expire = getlong()
+                lo_create = getlong()
+                hi_create = getlong()
+                sep = getstr()
+
+                if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
+                          hi_create, lo_create, sep) or (sep != "*"):
+                    break
+
+                m = self.msie_domain_re.search(domain_path)
+                if m:
+                    domain = m.group(1)
+                    path = m.group(2)
+
+                    cookies.append({"KEY": key, "VALUE": value, "DOMAIN": domain,
+                                    "PATH": path, "FLAGS": flags, "HIXP": hi_expire,
+                                    "LOXP": lo_expire, "HICREATE": hi_create,
+                                    "LOCREATE": lo_create})
+        finally:
+            cookies_fh.close()
+
+        return cookies
+
+    def load_cookie_data(self, filename,
+                         ignore_discard=False, ignore_expires=False):
+        """Load cookies from file containing actual cookie data.
+
+        Old cookies are kept unless overwritten by newly loaded ones.
+
+        You should not call this method if the delayload attribute is set.
+
+        I think each of these files contain all cookies for one user, domain,
+        and path.
+
+        filename: file containing cookies -- usually found in a file like
+         C:\WINNT\Profiles\joe\Cookies\joe at blah[1].txt
+
+        """
+        now = int(time.time())
+
+        cookie_data = self._load_cookies_from_file(filename)
+
+        for cookie in cookie_data:
+            flags = cookie["FLAGS"]
+            secure = ((flags & 0x2000) != 0)
+            filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
+            expires = epoch_time_offset_from_win32_filetime(filetime)
+            if expires < now:
+                discard = True
+            else:
+                discard = False
+            domain = cookie["DOMAIN"]
+            initial_dot = startswith(domain, ".")
+            if initial_dot:
+                domain_specified = True
+            else:
+                # MSIE 5 does not record whether the domain cookie-attribute
+                # was specified.
+                # Assuming it wasn't is conservative, because with strict
+                # domain matching this will match less frequently; with regular
+                # Netscape tail-matching, this will match at exactly the same
+                # times that domain_specified = True would.  It also means we
+                # don't have to prepend a dot to achieve consistency with our
+                # own & Mozilla's domain-munging scheme.
+                domain_specified = False
+
+            # assume path_specified is false
+            # XXX is there other stuff in here? -- eg. comment, commentURL?
+            c = Cookie(0,
+                       cookie["KEY"], cookie["VALUE"],
+                       None, False,
+                       domain, domain_specified, initial_dot,
+                       cookie["PATH"], False,
+                       secure,
+                       expires,
+                       discard,
+                       None,
+                       None,
+                       {"flags": flags})
+            if not ignore_discard and c.discard:
+                continue
+            if not ignore_expires and c.is_expired(now):
+                continue
+            CookieJar.set_cookie(self, c)
+
+    def load_from_registry(self, ignore_discard=False, ignore_expires=False,
+                           username=None):
+        """
+        username: only required on win9x
+
+        """
+        cookies_dir = regload(self.reg_path, self.reg_key)
+        filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
+        self.load(filename, ignore_discard, ignore_expires, username)
+
+    def _really_load(self, index, filename, ignore_discard, ignore_expires,
+                     username):
+        now = int(time.time())
+
+        if username is None:
+            username = string.lower(os.environ['USERNAME'])
+
+        cookie_dir = os.path.dirname(filename)
+
+        data = index.read(256)
+        if len(data) != 256:
+            raise LoadError("%s file is too short" % filename)
+
+        # Cookies' index.dat file starts with 32 bytes of signature
+        # followed by an offset to the first record, stored as a little-
+        # endian DWORD.
+        sig, size, data = data[:32], data[32:36], data[36:]
+        size = struct.unpack("<L", size)[0]
+
+        # check that sig is valid
+        if not self.magic_re.match(sig) or size != 0x4000:
+            raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
+                          (str(filename), sig, size))
+
+        # skip to start of first record
+        index.seek(size, 0)
+
+        sector = 128  # size of sector in bytes
+
+        while 1:
+            data = ""
+
+            # Cookies are usually in two contiguous sectors, so read in two
+            # sectors and adjust if not a Cookie.
+            to_read = 2 * sector
+            d = index.read(to_read)
+            if len(d) != to_read:
+                break
+            data = data + d
+
+            # Each record starts with a 4-byte signature and a count
+            # (little-endian DWORD) of sectors for the record.
+            sig, size, data = data[:4], data[4:8], data[8:]
+            size = struct.unpack("<L", size)[0]
+
+            to_read = (size - 2) * sector
+
+##             from urllib import quote
+##             print "data", quote(data)
+##             print "sig", quote(sig)
+##             print "size in sectors", size
+##             print "size in bytes", size*sector
+##             print "size in units of 16 bytes", (size*sector) / 16
+##             print "size to read in bytes", to_read
+##             print
+
+            if sig != "URL ":
+                assert (sig in ("HASH", "LEAK",
+                                self.padding, "\x00\x00\x00\x00"),
+                        "unrecognized MSIE index.dat record: %s" %
+                        binary_to_str(sig))
+                if sig == "\x00\x00\x00\x00":
+                    # assume we've got all the cookies, and stop
+                    break
+                if sig == self.padding:
+                    continue
+                # skip the rest of this record
+                assert to_read >= 0
+                if size != 2:
+                    assert to_read != 0
+                    index.seek(to_read, 1)
+                continue
+
+            # read in rest of record if necessary
+            if size > 2:
+                more_data = index.read(to_read)
+                if len(more_data) != to_read: break
+                data = data + more_data
+
+            cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
+                         "(%s\@[\x21-\xFF]+\.txt)" % username)
+            m = re.search(cookie_re, data, re.I)
+            if m:
+                cookie_file = os.path.join(cookie_dir, m.group(2))
+                if not self.delayload:
+                    try:
+                        self.load_cookie_data(cookie_file,
+                                              ignore_discard, ignore_expires)
+                    except (LoadError, IOError):
+                        debug("error reading cookie file, skipping: %s",
+                              cookie_file)
+                else:
+                    domain = m.group(1)
+                    i = domain.find("/")
+                    if i != -1:
+                        domain = domain[:i]
+
+                    self._delayload_domains[domain] = (
+                        cookie_file, ignore_discard, ignore_expires)
+
+
+class MSIECookieJar(MSIEBase, FileCookieJar):
+    """FileCookieJar that reads from the Windows MSIE cookies database.
+
+    MSIECookieJar can read the cookie files of Microsoft Internet Explorer
+    (MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
+    Windows 98.  Other configurations may also work, but are untested.  Saving
+    cookies in MSIE format is NOT supported.  If you save cookies, they'll be
+    in the usual Set-Cookie3 format, which you can read back in using an
+    instance of the plain old CookieJar class.  Don't save using the same
+    filename that you loaded cookies from, because you may succeed in
+    clobbering your MSIE cookies index file!
+
+    You should be able to have LWP share Internet Explorer's cookies like
+    this (note you need to supply a username to load_from_registry if you're on
+    Windows 9x or Windows ME):
+
+    cj = MSIECookieJar(delayload=1)
+    # find cookies index file in registry and load cookies from it
+    cj.load_from_registry()
+    opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
+    response = opener.open("http://example.com/")
+
+    Iterating over a delayloaded MSIECookieJar instance will not cause any
+    cookies to be read from disk.  To force reading of all cookies from disk,
+    call read_all_cookies.  Note that the following methods iterate over self:
+    clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
+    and as_string.
+
+    Additional methods:
+
+    load_from_registry(ignore_discard=False, ignore_expires=False,
+                       username=None)
+    load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
+    read_all_cookies()
+
+    """
+    def __init__(self, filename=None, delayload=False, policy=None):
+        MSIEBase.__init__(self)
+        FileCookieJar.__init__(self, filename, delayload, policy)
+
+    def set_cookie(self, cookie):
+        if self.delayload:
+            self._delayload_domain(cookie.domain)
+        CookieJar.set_cookie(self, cookie)
+
+    def _cookies_for_request(self, request):
+        """Return a list of cookies to be returned to server."""
+        domains = self._cookies.copy()
+        domains.update(self._delayload_domains)
+        domains = domains.keys()
+
+        cookies = []
+        for domain in domains:
+            cookies.extend(self._cookies_for_domain(domain, request))
+        return cookies
+
+    def _cookies_for_domain(self, domain, request):
+        if not self._policy.domain_return_ok(domain, request):
+            return []
+        debug("Checking %s for cookies to return", domain)
+        if self.delayload:
+            self._delayload_domain(domain)
+        return CookieJar._cookies_for_domain(self, domain, request)
+
+    def read_all_cookies(self):
+        """Eagerly read in all cookies."""
+        if self.delayload:
+            for domain in self._delayload_domains.keys():
+                self._delayload_domain(domain)
+
+    def load(self, filename, ignore_discard=False, ignore_expires=False,
+             username=None):
+        """Load cookies from an MSIE 'index.dat' cookies index file.
+
+        filename: full path to cookie index file
+        username: only required on win9x
+
+        """
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        index = open(filename, "rb")
+
+        try:
+            self._really_load(index, filename, ignore_discard, ignore_expires,
+                              username)
+        finally:
+            index.close()


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIECookieJar.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIEDBCookieJar.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIEDBCookieJar.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIEDBCookieJar.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,140 @@
+"""Persistent CookieJar based on MS Internet Explorer cookie database.
+
+Copyright 2003-2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+**********************************************************************
+THIS DOESN'T WORK!
+
+It's just a sketch, to check the base class is OK.
+
+**********************************************************************
+
+"""
+
+from ClientCookie import MSIEBase, CookieJar
+from _Util import time2netscape
+
+def set_cookie_hdr_from_cookie(cookie):
+    params = []
+    if cookie.name is not None:
+        params.append("%s=%s" % cookie.name, cookie.value)
+    else:
+        params.append(cookie.name)
+    if cookie.expires:
+        params.append("expires=" % time2netscape(cookie.expires))
+    if cookie.domain_specified:
+        params.append("Domain=%s" % cookie.domain)
+    if cookie.path_specified:
+        params.append("path=%s" % cookie.path)
+    if cookie.port_specified:
+        if cookie.port is None:
+            params.append("Port")
+        else:
+            params.append("Port=%s" % cookie.port)
+    if cookie.secure:
+        params.append("secure")
+##     if cookie.comment:
+##         params.append("Comment=%s" % cookie.comment)
+##     if cookie.comment_url:
+##         params.append("CommentURL=%s" % cookie.comment_url)
+    return "; ".join(params)
+
+class MSIEDBCookieJar(MSIEBase, CookieJar):
+    """A CookieJar that relies on MS Internet Explorer's cookie database.
+
+    XXX Require ctypes or write C extension?  win32all probably requires
+    latter.
+
+    **********************************************************************
+    THIS DOESN'T WORK!
+
+    It's just a sketch, to check the base class is OK.
+
+    **********************************************************************
+
+    MSIEDBCookieJar, unlike MSIECookieJar, keeps no state for itself, but
+    relies on the MS Internet Explorer's cookie database.  It uses the win32
+    API functions InternetGetCookie() and InternetSetCookie(), from the wininet
+    library.
+
+    Note that MSIE itself may impose additional conditions on cookie processing
+    on top of that done by CookiePolicy.  For cookie setting, the class tries
+    to foil that by providing the request details and Set-Cookie header it
+    thinks MSIE wants to see.  For returning cookies to the server, it's up to
+    MSIE.
+
+    Note that session cookies ARE NOT written to disk and won't be accessible
+    from other processes.  .clear_session_cookies() has no effect.
+
+    .clear_expired_cookies() has no effect: MSIE is responsible for this.
+
+    .clear() will raise NotImplementedError unless all three arguments are
+    given.
+
+    """
+    def __init__(self, policy=None):
+        MSIEBase.__init__(self)
+        FileCookieJar.__init__(self, policy)
+    def clear_session_cookies(self): pass
+    def clear_expired_cookies(self): pass
+    def clear(self, domain=None, path=None, name=None):
+        if None in [domain, path, name]:
+            raise NotImplementedError()
+        # XXXX
+        url = self._fake_url(domain, path)
+        hdr = "%s=; domain=%s; path=%s; max-age=0" % (name, domain, path)
+        r = windll.InternetSetCookie(url, None, hdr)
+        # XXX return value of InternetSetCookie?
+    def _fake_url(self, domain, path):
+        # to convince MSIE that Set-Cookie is OK
+        return "http://%s%s" % (domain, path)
+    def set_cookie(self, cookie):
+        # XXXX
+        url = self._fake_url(cookie.domain, cookie.path)
+        r = windll.InternetSetCookie(
+            url, None, set_cookie_hdr_from_cookie(cookie))
+        # XXX return value of InternetSetCookie?
+    def add_cookie_header(self, request, unverifiable=False):
+        # XXXX
+        cookie_header = windll.InternetGetCookie(request.get_full_url())
+        # XXX return value of InternetGetCookie?
+        request.add_unredirected_header(cookie_header)
+    def __iter__(self):
+        self._load_index_dat()
+        return CookieJar.__iter__(self)
+    def _cookies_for_request(self, request):
+        raise NotImplementedError()  # XXXX
+    def _cookies_for_domain(self, domain, request):
+        #raise NotImplementedError()  # XXXX
+        debug("Checking %s for cookies to return", domain)
+        if not self._policy.domain_return_ok(domain, request):
+            return []
+
+        # XXXX separate out actual loading of cookie data, so only index.dat is
+        #  read in ._load_index_dat(), and ._really_load() calls that, then
+        #  ._delayload_domain for all domains if not self.delayload.
+        #  We then just call ._load_index_dat()
+        self._delayload = False
+        self._really_load()
+
+        cookies_by_path = self._cookies.get(domain)
+        if cookies_by_path is None:
+            return []
+
+        cookies = []
+        for path in cookies_by_path.keys():
+            if not self._policy.path_return_ok(path, request, unverifiable):
+                continue
+            for name, cookie in cookies_by_path[path].items():
+                if not self._policy.return_ok(cookie, request, unverifiable):
+                    debug("   not returning cookie")
+                    continue
+                debug("   it's a match")
+                cookies.append(cookie)
+
+        return cookies
+


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MSIEDBCookieJar.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MozillaCookieJar.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MozillaCookieJar.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MozillaCookieJar.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,173 @@
+"""Mozilla / Netscape cookie loading / saving.
+
+Copyright 2002-2004 John J Lee <jjl at pobox.com>
+Copyright 1997-1999 Gisle Aas (original libwww-perl code)
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+import re, string, time
+
+from _ClientCookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
+     MISSING_FILENAME_TEXT, LoadError
+from _Util import startswith, endswith
+from _Debug import getLogger
+debug = getLogger("ClientCookie").debug
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+try: issubclass(Exception(), (Exception,))
+except TypeError:
+    real_issubclass = issubclass
+    from _Util import compat_issubclass
+    issubclass = compat_issubclass
+    del compat_issubclass
+
+
+class MozillaCookieJar(FileCookieJar):
+    """
+
+    WARNING: you may want to backup your browser's cookies file if you use
+    this class to save cookies.  I *think* it works, but there have been
+    bugs in the past!
+
+    This class differs from CookieJar only in the format it uses to save and
+    load cookies to and from a file.  This class uses the Mozilla/Netscape
+    `cookies.txt' format.  lynx uses this file format, too.
+
+    Don't expect cookies saved while the browser is running to be noticed by
+    the browser (in fact, Mozilla on unix will overwrite your saved cookies if
+    you change them on disk while it's running; on Windows, you probably can't
+    save at all while the browser is running).
+
+    Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
+    Netscape cookies on saving.
+
+    In particular, the cookie version and port number information is lost,
+    together with information about whether or not Path, Port and Discard were
+    specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
+    domain as set in the HTTP header started with a dot (yes, I'm aware some
+    domains in Netscape files start with a dot and some don't -- trust me, you
+    really don't want to know any more about this).
+
+    Note that though Mozilla and Netscape use the same format, they use
+    slightly different headers.  The class saves cookies using the Netscape
+    header by default (Mozilla can cope with that).
+
+    """
+    magic_re = "#( Netscape)? HTTP Cookie File"
+    header = """\
+    # Netscape HTTP Cookie File
+    # http://www.netscape.com/newsref/std/cookie_spec.html
+    # This is a generated file!  Do not edit.
+
+"""
+
+    def _really_load(self, f, filename, ignore_discard, ignore_expires):
+        now = time.time()
+
+        magic = f.readline()
+        if not re.search(self.magic_re, magic):
+            f.close()
+            raise LoadError(
+                "%s does not look like a Netscape format cookies file" %
+                filename)
+
+        try:
+            while 1:
+                line = f.readline()
+                if line == "": break
+
+                # last field may be absent, so keep any trailing tab
+                if endswith(line, "\n"): line = line[:-1]
+
+                # skip comments and blank lines XXX what is $ for?
+                if (startswith(string.strip(line), "#") or
+                    startswith(string.strip(line), "$") or
+                    string.strip(line) == ""):
+                    continue
+
+                domain, domain_specified, path, secure, expires, name, value = \
+                        string.split(line, "\t")
+                secure = (secure == "TRUE")
+                domain_specified = (domain_specified == "TRUE")
+                if name == "":
+                    name = value
+                    value = None
+
+                initial_dot = startswith(domain, ".")
+                assert domain_specified == initial_dot
+
+                discard = False
+                if expires == "":
+                    expires = None
+                    discard = True
+
+                # assume path_specified is false
+                c = Cookie(0, name, value,
+                           None, False,
+                           domain, domain_specified, initial_dot,
+                           path, False,
+                           secure,
+                           expires,
+                           discard,
+                           None,
+                           None,
+                           {})
+                if not ignore_discard and c.discard:
+                    continue
+                if not ignore_expires and c.is_expired(now):
+                    continue
+                self.set_cookie(c)
+
+        except:
+            reraise_unmasked_exceptions((IOError,))
+            raise LoadError("invalid Netscape format file %s: %s" %
+                          (filename, line))
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        f = open(filename, "w")
+        try:
+            debug("Saving Netscape cookies.txt file")
+            f.write(self.header)
+            now = time.time()
+            for cookie in self:
+                if not ignore_discard and cookie.discard:
+                    debug("   Not saving %s: marked for discard", cookie.name)
+                    continue
+                if not ignore_expires and cookie.is_expired(now):
+                    debug("   Not saving %s: expired", cookie.name)
+                    continue
+                if cookie.secure: secure = "TRUE"
+                else: secure = "FALSE"
+                if startswith(cookie.domain, "."): initial_dot = "TRUE"
+                else: initial_dot = "FALSE"
+                if cookie.expires is not None:
+                    expires = str(cookie.expires)
+                else:
+                    expires = ""
+                if cookie.value is None:
+                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                    # with no name, whereas cookielib regards it as a
+                    # cookie with no value.
+                    name = ""
+                    value = cookie.name
+                else:
+                    name = cookie.name
+                    value = cookie.value
+                f.write(
+                    string.join([cookie.domain, initial_dot, cookie.path,
+                                 secure, expires, name, value], "\t")+
+                    "\n")
+        finally:
+            f.close()


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_MozillaCookieJar.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Opener.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Opener.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Opener.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,197 @@
+"""Integration with Python standard library module urllib2: OpenerDirector
+class.
+
+Copyright 2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+import urllib2, string, bisect, urlparse
+
+from _Util import startswith, isstringlike
+from _Request import Request
+
+def methnames(obj):
+    """Return method names of class instance.
+
+    dir(obj) doesn't work across Python versions, this does.
+
+    """
+    return methnames_of_instance_as_dict(obj).keys()
+
+def methnames_of_instance_as_dict(inst):
+    names = {}
+    names.update(methnames_of_class_as_dict(inst.__class__))
+    for methname in dir(inst):
+        candidate = getattr(inst, methname)
+        if callable(candidate):
+            names[methname] = None
+    return names
+
+def methnames_of_class_as_dict(klass):
+    names = {}
+    for methname in dir(klass):
+        candidate = getattr(klass, methname)
+        if callable(candidate):
+            names[methname] = None
+    for baseclass in klass.__bases__:
+        names.update(methnames_of_class_as_dict(baseclass))
+    return names
+
+
+class OpenerMixin:
+    def _request(self, url_or_req, data):
+        if isstringlike(url_or_req):
+            req = Request(url_or_req, data)
+        else:
+            # already a urllib2.Request or ClientCookie.Request instance
+            req = url_or_req
+            if data is not None:
+                req.add_data(data)
+        return req
+
+    def retrieve(self, fullurl, filename=None, reporthook=None, data=None):
+        """Returns (filename, headers).
+
+        For remote objects, the default filename will refer to a temporary
+        file.
+
+        """
+        req = self._request(fullurl, data)
+        type_ = req.get_type()
+        fp = self.open(req)
+        headers = fp.info()
+        if filename is None and type == 'file':
+            return url2pathname(req.get_selector()), headers
+        if filename:
+            tfp = open(filename, 'wb')
+        else:
+            path = urlparse(fullurl)[2]
+            suffix = os.path.splitext(path)[1]
+            tfp = tempfile.TemporaryFile("wb", suffix=suffix)
+        result = filename, headers
+        bs = 1024*8
+        size = -1
+        read = 0
+        blocknum = 1
+        if reporthook:
+            if headers.has_key("content-length"):
+                size = int(headers["Content-Length"])
+            reporthook(0, bs, size)
+        while 1:
+            block = fp.read(bs)
+            read += len(block)
+            if reporthook:
+                reporthook(blocknum, bs, size)
+            blocknum = blocknum + 1
+            if not block:
+                break
+            tfp.write(block)
+        fp.close()
+        tfp.close()
+        del fp
+        del tfp
+        if size>=0 and read<size:
+            raise IOError("incomplete retrieval error",
+                          "got only %d bytes out of %d" % (read,size))
+        return result
+
+
+class OpenerDirector(urllib2.OpenerDirector, OpenerMixin):
+    def __init__(self):
+        urllib2.OpenerDirector.__init__(self)
+        self.process_response = {}
+        self.process_request = {}
+
+    def add_handler(self, handler):
+        added = False
+        for meth in methnames(handler):
+            i = string.find(meth, "_")
+            protocol = meth[:i]
+            condition = meth[i+1:]
+
+            if startswith(condition, "error"):
+                j = string.find(meth[i+1:], "_") + i + 1
+                kind = meth[j+1:]
+                try:
+                    kind = int(kind)
+                except ValueError:
+                    pass
+                lookup = self.handle_error.get(protocol, {})
+                self.handle_error[protocol] = lookup
+            elif (condition == "open" and
+                  protocol not in ["do", "proxy"]):  # hack -- see below
+                kind = protocol
+                lookup = self.handle_open
+            elif (condition in ["response", "request"] and
+                  protocol != "redirect"):  # yucky hack
+                # hack above is to fix HTTPRedirectHandler problem, which
+                # appears to above line to be a processor because of the
+                # redirect_request method :-((
+                kind = protocol
+                lookup = getattr(self, "process_"+condition)
+            else:
+                continue
+
+            if lookup.has_key(kind):
+                bisect.insort(lookup[kind], handler)
+            else:
+                lookup[kind] = [handler]
+            added = True
+            continue
+
+        if added:
+            # XXX why does self.handlers need to be sorted?
+            bisect.insort(self.handlers, handler)
+            handler.add_parent(self)
+
+    def open(self, fullurl, data=None):
+        req = self._request(fullurl, data)
+        type_ = req.get_type()
+
+        # pre-process request
+        # XXX should we allow a Processor to change the type (URL
+        #   scheme) of the request?
+        meth_name = type_+"_request"
+        for processor in self.process_request.get(type_, []):
+            meth = getattr(processor, meth_name)
+            req = meth(req)
+
+        response = urllib2.OpenerDirector.open(self, req, data)
+
+        # post-process response
+        meth_name = type_+"_response"
+        for processor in self.process_response.get(type_, []):
+            meth = getattr(processor, meth_name)
+            response = meth(req, response)
+
+        return response
+
+    def error(self, proto, *args):
+        if proto in ['http', 'https']:
+            # XXX http[s] protocols are special-cased
+            dict = self.handle_error['http'] # https is not different than http
+            proto = args[2]  # YUCK!
+            meth_name = 'http_error_%s' % proto
+            http_err = 1
+            orig_args = args
+        else:
+            dict = self.handle_error
+            meth_name = proto + '_error'
+            http_err = 0
+        args = (dict, proto, meth_name) + args
+        result = apply(self._call_chain, args)
+        if result:
+            return result
+
+        if http_err:
+            args = (dict, 'default', 'http_error_default') + orig_args
+            return apply(self._call_chain, args)


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Opener.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Request.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Request.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Request.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,73 @@
+"""Integration with Python standard library module urllib2: Request class.
+
+Copyright 2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+import urllib2, string
+
+from _ClientCookie import request_host
+
+
+class Request(urllib2.Request):
+    def __init__(self, url, data=None, headers={},
+             origin_req_host=None, unverifiable=False):
+        urllib2.Request.__init__(self, url, data, headers)
+        self.unredirected_hdrs = {}
+
+        # All the terminology below comes from RFC 2965.
+        self.unverifiable = unverifiable
+        # Set request-host of origin transaction.
+        # The origin request-host is needed in order to decide whether
+        # unverifiable sub-requests (automatic redirects, images embedded
+        # in HTML, etc.) are to third-party hosts.  If they are, the
+        # resulting transactions might need to be conducted with cookies
+        # turned off.
+        if origin_req_host is None:
+            origin_req_host = request_host(self)
+        self.origin_req_host = origin_req_host
+
+    def get_origin_req_host(self):
+        return self.origin_req_host
+
+    def is_unverifiable(self):
+        return self.unverifiable
+
+    def add_unredirected_header(self, key, val):
+        """Add a header that will not be added to a redirected request."""
+        self.unredirected_hdrs[string.capitalize(key)] = val
+
+    def has_header(self, header_name):
+        """True iff request has named header (regular or unredirected)."""
+        if (self.headers.has_key(header_name) or
+            self.unredirected_hdrs.has_key(header_name)):
+            return True
+        return False
+
+    def get_header(self, header_name, default=None):
+        return self.headers.get(
+            header_name,
+            self.unredirected_hdrs.get(header_name, default))
+
+    def header_items(self):
+        hdrs = self.unredirected_hdrs.copy()
+        hdrs.update(self.headers)
+        return hdrs.items()
+
+    def __str__(self):
+        return "<Request for %s>" % self.get_full_url()
+
+    def get_method(self):
+        if self.has_data():
+            return "POST"
+        else:
+            return "GET"


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Request.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Util.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Util.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Util.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,544 @@
+"""Python backwards-compat., date/time routines, seekable file object wrapper.
+
+ Copyright 2002-2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+import re, string, time
+from types import TupleType
+from StringIO import StringIO
+
+try:
+    from exceptions import StopIteration
+except ImportError:
+    from ClientCookie._ClientCookie import StopIteration
+
+def startswith(string, initial):
+    if len(initial) > len(string): return False
+    return string[:len(initial)] == initial
+
+def endswith(string, final):
+    if len(final) > len(string): return False
+    return string[-len(final):] == final
+
+def compat_issubclass(obj, tuple_or_class):
+    # for 2.1 and below
+    if type(tuple_or_class) == TupleType:
+        for klass in tuple_or_class:
+            if issubclass(obj, klass):
+                return True
+        return False
+    return issubclass(obj, tuple_or_class)
+
+def compat_isinstance(obj, tuple_or_class):
+    # for 2.1 and below
+    if type(tuple_or_class) == TupleType:
+        for klass in tuple_or_class:
+            if isinstance(obj, klass):
+                return True
+        return False
+    return isinstance(obj, tuple_or_class)
+
+def isstringlike(x):
+    try: x+""
+    except: return False
+    else: return True
+
+SPACE_DICT = {}
+for c in string.whitespace:
+    SPACE_DICT[c] = None
+del c
+def isspace(string):
+    for c in string:
+        if not SPACE_DICT.has_key(c): return False
+    return True
+
+# this is here rather than in _HeadersUtil as it's just for
+# compatibility with old Python versions, rather than entirely new code
+def getheaders(msg, name):
+    """Get all values for a header.
+
+    This returns a list of values for headers given more than once; each
+    value in the result list is stripped in the same way as the result of
+    getheader().  If the header is not given, return an empty list.
+    """
+    result = []
+    current = ''
+    have_header = 0
+    for s in msg.getallmatchingheaders(name):
+        if isspace(s[0]):
+            if current:
+                current = "%s\n %s" % (current, string.strip(s))
+            else:
+                current = string.strip(s)
+        else:
+            if have_header:
+                result.append(current)
+            current = string.strip(s[string.find(s, ":") + 1:])
+            have_header = 1
+    if have_header:
+        result.append(current)
+    return result
+
+try:
+    from calendar import timegm
+    timegm((2045, 1, 1, 22, 23, 32))  # overflows in 2.1
+except:
+    # Number of days per month (except for February in leap years)
+    mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+    # Return 1 for leap years, 0 for non-leap years
+    def isleap(year):
+	return year % 4 == 0 and (year % 100 <> 0 or year % 400 == 0)
+
+    # Return number of leap years in range [y1, y2)
+    # Assume y1 <= y2 and no funny (non-leap century) years
+    def leapdays(y1, y2):
+	return (y2+3)/4 - (y1+3)/4
+
+    EPOCH = 1970
+    def timegm(tuple):
+        """Unrelated but handy function to calculate Unix timestamp from GMT."""
+        year, month, day, hour, minute, second = tuple[:6]
+        assert year >= EPOCH
+        assert 1 <= month <= 12
+        days = 365*(year-EPOCH) + leapdays(EPOCH, year)
+        for i in range(1, month):
+            days = days + mdays[i]
+        if month > 2 and isleap(year):
+            days = days + 1
+        days = days + day - 1
+        hours = days*24 + hour
+        minutes = hours*60 + minute
+        seconds = minutes*60L + second
+        return seconds
+
+
+# Date/time conversion routines for formats used by the HTTP protocol.
+
+EPOCH = 1970
+def my_timegm(tt):
+    year, month, mday, hour, min, sec = tt[:6]
+    if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
+        (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
+        return timegm(tt)
+    else:
+        return None
+
+days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
+          "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+months_lower = []
+for month in months: months_lower.append(string.lower(month))
+
+
+def time2isoz(t=None):
+    """Return a string representing time in seconds since epoch, t.
+
+    If the function is called without an argument, it will use the current
+    time.
+
+    The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
+    representing Universal Time (UTC, aka GMT).  An example of this format is:
+
+    1994-11-24 08:49:37Z
+
+    """
+    if t is None: t = time.time()
+    year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
+    return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
+        year, mon, mday, hour, min, sec)
+
+def time2netscape(t=None):
+    """Return a string representing time in seconds since epoch, t.
+
+    If the function is called without an argument, it will use the current
+    time.
+
+    The format of the returned string is like this:
+
+    Wed, DD-Mon-YYYY HH:MM:SS GMT
+
+    """
+    if t is None: t = time.time()
+    year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
+    return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
+        days[wday], mday, months[mon-1], year, hour, min, sec)
+
+
+UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
+
+timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
+def offset_from_tz_string(tz):
+    offset = None
+    if UTC_ZONES.has_key(tz):
+        offset = 0
+    else:
+        m = timezone_re.search(tz)
+        if m:
+            offset = 3600 * int(m.group(2))
+            if m.group(3):
+                offset = offset + 60 * int(m.group(3))
+            if m.group(1) == '-':
+                offset = -offset
+    return offset
+
+def _str2time(day, mon, yr, hr, min, sec, tz):
+    # translate month name to number
+    # month numbers start with 1 (January)
+    try:
+        mon = months_lower.index(string.lower(mon))+1
+    except ValueError:
+        # maybe it's already a number
+        try:
+            imon = int(mon)
+        except ValueError:
+            return None
+        if 1 <= imon <= 12:
+            mon = imon
+        else:
+            return None
+
+    # make sure clock elements are defined
+    if hr is None: hr = 0
+    if min is None: min = 0
+    if sec is None: sec = 0
+
+    yr = int(yr)
+    day = int(day)
+    hr = int(hr)
+    min = int(min)
+    sec = int(sec)
+
+    if yr < 1000:
+	# find "obvious" year
+	cur_yr = time.localtime(time.time())[0]
+	m = cur_yr % 100
+	tmp = yr
+	yr = yr + cur_yr - m
+	m = m - tmp
+        if abs(m) > 50:
+            if m > 0: yr = yr + 100
+            else: yr = yr - 100
+
+    # convert UTC time tuple to seconds since epoch (not timezone-adjusted)
+    t = my_timegm((yr, mon, day, hr, min, sec, tz))
+
+    if t is not None:
+        # adjust time using timezone string, to get absolute time since epoch
+        if tz is None:
+            tz = "UTC"
+        tz = string.upper(tz)
+        offset = offset_from_tz_string(tz)
+        if offset is None:
+            return None
+        t = t - offset
+
+    return t
+
+
+strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) (\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
+wkday_re = re.compile(
+    r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
+loose_http_re = re.compile(
+    r"""^
+    (\d\d?)            # day
+       (?:\s+|[-\/])
+    (\w+)              # month
+        (?:\s+|[-\/])
+    (\d+)              # year
+    (?:
+	  (?:\s+|:)    # separator before clock
+       (\d\d?):(\d\d)  # hour:min
+       (?::(\d\d))?    # optional seconds
+    )?                 # optional clock
+       \s*
+    ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
+       \s*
+    (?:\(\w+\))?       # ASCII representation of timezone in parens.
+       \s*$""", re.X)
+def http2time(text):
+    """Returns time in seconds since epoch of time represented by a string.
+
+    Return value is an integer.
+
+    None is returned if the format of str is unrecognized, the time is outside
+    the representable range, or the timezone string is not recognized.  If the
+    string contains no timezone, UTC is assumed.
+
+    The timezone in the string may be numerical (like "-0800" or "+0100") or a
+    string timezone (like "UTC", "GMT", "BST" or "EST").  Currently, only the
+    timezone strings equivalent to UTC (zero offset) are known to the function.
+
+    The function loosely parses the following formats:
+
+    Wed, 09 Feb 1994 22:23:32 GMT       -- HTTP format
+    Tuesday, 08-Feb-94 14:15:29 GMT     -- old rfc850 HTTP format
+    Tuesday, 08-Feb-1994 14:15:29 GMT   -- broken rfc850 HTTP format
+    09 Feb 1994 22:23:32 GMT            -- HTTP format (no weekday)
+    08-Feb-94 14:15:29 GMT              -- rfc850 format (no weekday)
+    08-Feb-1994 14:15:29 GMT            -- broken rfc850 format (no weekday)
+
+    The parser ignores leading and trailing whitespace.  The time may be
+    absent.
+
+    If the year is given with only 2 digits, the function will select the
+    century that makes the year closest to the current date.
+
+    """
+    # fast exit for strictly conforming string
+    m = strict_re.search(text)
+    if m:
+        g = m.groups()
+        mon = months_lower.index(string.lower(g[1])) + 1
+        tt = (int(g[2]), mon, int(g[0]),
+              int(g[3]), int(g[4]), float(g[5]))
+        return my_timegm(tt)
+
+    # No, we need some messy parsing...
+
+    # clean up
+    text = string.lstrip(text)
+    text = wkday_re.sub("", text, 1)  # Useless weekday
+
+    # tz is time zone specifier string
+    day, mon, yr, hr, min, sec, tz = [None]*7
+
+    # loose regexp parse
+    m = loose_http_re.search(text)
+    if m is not None:
+        day, mon, yr, hr, min, sec, tz = m.groups()
+    else:
+        return None  # bad format
+
+    return _str2time(day, mon, yr, hr, min, sec, tz)
+
+
+iso_re = re.compile(
+    """^
+    (\d{4})              # year
+       [-\/]?
+    (\d\d?)              # numerical month
+       [-\/]?
+    (\d\d?)              # day
+   (?:
+         (?:\s+|[-:Tt])  # separator before clock
+      (\d\d?):?(\d\d)    # hour:min
+      (?::?(\d\d(?:\.\d*)?))?  # optional seconds (and fractional)
+   )?                    # optional clock
+      \s*
+   ([-+]?\d\d?:?(:?\d\d)?
+    |Z|z)?               # timezone  (Z is "zero meridian", i.e. GMT)
+      \s*$""", re.X)
+def iso2time(text):
+    """
+    As for http2time, but parses the ISO 8601 formats:
+
+    1994-02-03 14:15:29 -0100    -- ISO 8601 format
+    1994-02-03 14:15:29          -- zone is optional
+    1994-02-03                   -- only date
+    1994-02-03T14:15:29          -- Use T as separator
+    19940203T141529Z             -- ISO 8601 compact format
+    19940203                     -- only date
+
+    """
+    # clean up
+    text = string.lstrip(text)
+
+    # tz is time zone specifier string
+    day, mon, yr, hr, min, sec, tz = [None]*7
+
+    # loose regexp parse
+    m = iso_re.search(text)
+    if m is not None:
+        # XXX there's an extra bit of the timezone I'm ignoring here: is
+        #   this the right thing to do?
+        yr, mon, day, hr, min, sec, tz, _ = m.groups()
+    else:
+        return None  # bad format
+
+    return _str2time(day, mon, yr, hr, min, sec, tz)
+
+
+
+# XXX Andrew Dalke kindly sent me a similar class in response to my request on
+# comp.lang.python, which I then proceeded to lose.  I wrote this class
+# instead, but I think he's released his code publicly since, could pinch the
+# tests from it, at least...
+class seek_wrapper:
+    """Adds a seek method to a file object.
+
+    This is only designed for seeking on readonly file-like objects.
+
+    Wrapped file-like object must have a read method.  The readline method is
+    only supported if that method is present on the wrapped object.  The
+    readlines method is always supported.  xreadlines and iteration are
+    supported only for Python 2.2 and above.
+
+    Public attribute: wrapped (the wrapped file object).
+
+    WARNING: All other attributes of the wrapped object (ie. those that are not
+    one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
+    are passed through unaltered, which may or may not make sense for your
+    particular file object.
+
+    """
+    # General strategy is to check that cache is full enough, then delegate
+    # everything to the cache (self._cache, which is a StringIO.StringIO
+    # instance.  Seems to be some cStringIO.StringIO problem on 1.5.2 -- I
+    # get a StringOobject, with no readlines method.
+
+    # Invariant: the end of the cache is always at the same place as the
+    # end of the wrapped file:
+    # self.wrapped.tell() == self.__cache.tell()
+
+    def __init__(self, wrapped):
+        self.wrapped = wrapped
+        self.__have_readline = hasattr(self.wrapped, "readline")
+        self.__cache = StringIO()
+
+    def __getattr__(self, name):
+        wrapped = self.__dict__.get("wrapped")
+        if wrapped:
+            return getattr(wrapped, name)
+        return getattr(self.__class__, name)
+
+    def seek(self, offset, whence=0):
+        # make sure we have read all data up to the point we are seeking to
+        pos = self.__cache.tell()
+        if whence == 0:  # absolute
+            to_read = offset - pos
+        elif whence == 1:  # relative to current position
+            to_read = offset
+        elif whence == 2:  # relative to end of *wrapped* file
+            # since we don't know yet where the end of that file is, we must
+            # read everything
+            to_read = None
+        if to_read is None or to_read >= 0:
+            if to_read is None:
+                self.__cache.write(self.wrapped.read())
+            else:
+                self.__cache.write(self.wrapped.read(to_read))
+            self.__cache.seek(pos)
+
+        return self.__cache.seek(offset, whence)
+
+    def tell(self):
+        return self.__cache.tell()
+
+    def read(self, size=-1):
+        pos = self.__cache.tell()
+
+        self.__cache.seek(pos)
+
+        end = len(self.__cache.getvalue())
+        available = end - pos
+
+        # enough data already cached?
+        if size <= available and size != -1:
+            return self.__cache.read(size)
+
+        # no, so read sufficient data from wrapped file and cache it
+        to_read = size - available
+        assert to_read > 0 or size == -1
+        self.__cache.seek(0, 2)
+        if size == -1:
+            self.__cache.write(self.wrapped.read())
+        else:
+            self.__cache.write(self.wrapped.read(to_read))
+        self.__cache.seek(pos)
+
+        return self.__cache.read(size)
+
+    def readline(self, size=-1):
+        if not self.__have_readline:
+            raise NotImplementedError("no readline method on wrapped object")
+
+        # line we're about to read might not be complete in the cache, so
+        # read another line first
+        pos = self.__cache.tell()
+        self.__cache.seek(0, 2)
+        self.__cache.write(self.wrapped.readline())
+        self.__cache.seek(pos)
+
+        data = self.__cache.readline()
+        if size != -1:
+            r = data[:size]
+            self.__cache.seek(pos+size)
+        else:
+            r = data
+        return r
+
+    def readlines(self, sizehint=-1):
+        pos = self.__cache.tell()
+        self.__cache.seek(0, 2)
+        self.__cache.write(self.wrapped.read())
+        self.__cache.seek(pos)
+        try:
+            return self.__cache.readlines(sizehint)
+        except TypeError:  # 1.5.2 hack
+            return self.__cache.readlines()
+
+    def __iter__(self): return self
+    def next(self):
+        line = self.readline()
+        if line == "": raise StopIteration
+        return line
+
+    xreadlines = __iter__
+
+    def __repr__(self):
+        return ("<%s at %s whose wrapped object = %s>" %
+                (self.__class__.__name__, `id(self)`, `self.wrapped`))
+
+    def close(self):
+        self._cache = None
+        self.read = None
+        self.readline = None
+        self.readlines = None
+        self.seek = None
+        if self.wrapped: self.wrapped.close()
+        self.wrapped = None
+
+class eoffile:
+    # file-like object that always claims to be at end-of-file
+    def read(self, size=-1): return ""
+    def readline(self, size=-1): return ""
+
+class response_seek_wrapper(seek_wrapper):
+    """Avoids unnecessarily clobbering methods on .close().
+
+    Also supports pickling.
+
+    Class name is for historical reasons.
+
+    """
+
+    def close(self):
+        self.wrapped.close()
+        self.wrapped = eoffile()
+
+    def __getstate__(self):
+        # There are three obvious options here:
+        # 1. truncate
+        # 2. read to end
+        # 3. close socket, pickle state including read position, then open
+        #    again on unpickle and use Range header
+
+        # 2 breaks pickle protocol, because one expects the original object
+        # to be left unscathed by pickling.  3 is too complicated and
+        # surprising (and too much work ;-) to happen in a sane __getstate__.
+        # So we do 1.
+
+        state = self.__dict__.copy()
+        state["wrapped"] = eoffile()
+        return state


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_Util.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/__init__.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/__init__.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/__init__.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,68 @@
+import sys
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+# If you hate the idea of turning bugs into warnings, do:
+# import ClientCookie; ClientCookie.USE_BARE_EXCEPT = False
+USE_BARE_EXCEPT = True
+WARNINGS_STREAM = sys.stdout
+
+# Import names so that they can be imported directly from the package, like
+# this:
+#from ClientCookie import <whatever>
+
+# These work like equivalents from logging.  Use logging direct if you
+# have 2.3.
+from _Debug import getLogger, StreamHandler, NOTSET, INFO, DEBUG
+
+from _ClientCookie import VERSION, __doc__, \
+     Cookie, \
+     CookiePolicy, DefaultCookiePolicy, \
+     CookieJar, FileCookieJar, LoadError, request_host
+from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
+from _MozillaCookieJar import MozillaCookieJar
+from _MSIECookieJar import MSIECookieJar
+try:
+    import bsddb
+except ImportError:
+    pass
+else:
+    from _BSDDBCookieJar import BSDDBCookieJar, CreateBSDDBCookieJar
+#from _MSIEDBCookieJar import MSIEDBCookieJar
+from _ConnCache import ConnectionCache
+try:
+    from urllib2 import AbstractHTTPHandler
+except ImportError:
+    pass
+else:
+    from ClientCookie._urllib2_support import \
+         Request, \
+         OpenerDirector, build_opener, install_opener, urlopen, \
+         OpenerFactory, urlretrieve, BaseHandler
+    from ClientCookie._urllib2_support import \
+         HTTPHandler, HTTPRedirectHandler, \
+         HTTPRequestUpgradeProcessor, \
+         HTTPEquivProcessor, SeekableProcessor, HTTPCookieProcessor, \
+         HTTPRefererProcessor, \
+         HTTPRefreshProcessor, HTTPErrorProcessor, \
+         HTTPResponseDebugProcessor, HTTPRedirectDebugProcessor
+
+    try:
+        import robotparser
+    except ImportError:
+        pass
+    else:
+        from ClientCookie._urllib2_support import \
+             HTTPRobotRulesProcessor, RobotExclusionError
+        del robotparser
+
+    import httplib
+    if hasattr(httplib, 'HTTPS'):
+        from ClientCookie._urllib2_support import HTTPSHandler
+    del AbstractHTTPHandler, httplib
+from _Util import http2time
+str2time = http2time
+del http2time


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/__init__.py
___________________________________________________________________
Name: svn:eol-style
   + native

Added: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_urllib2_support.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_urllib2_support.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_urllib2_support.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -0,0 +1,732 @@
+"""Integration with Python standard library module urllib2.
+
+Also includes a redirection bugfix, support for parsing HTML HEAD blocks for
+the META HTTP-EQUIV tag contents, and following Refresh header redirects.
+
+Copyright 2002-2004 John J Lee <jjl at pobox.com>
+
+This code is free software; you can redistribute it and/or modify it under
+the terms of the BSD License (see the file COPYING included with the
+distribution).
+
+"""
+
+import copy, time, tempfile
+
+import ClientCookie
+from _ClientCookie import CookieJar, request_host
+from _Util import isstringlike, startswith, getheaders
+from _Debug import getLogger
+info = getLogger("ClientCookie").info
+
+try: True
+except NameError:
+    True = 1
+    False = 0
+
+
+CHUNK = 1024  # size of chunks fed to HTML HEAD parser, in bytes
+
+try:
+    from urllib2 import AbstractHTTPHandler
+except ImportError:
+    pass
+else:
+    import urlparse, urllib2, urllib, httplib
+    from urllib2 import URLError, HTTPError
+    import types, string, socket
+    from cStringIO import StringIO
+    try:
+        import threading
+        _threading = threading; del threading
+    except ImportError:
+        import dummy_threading
+        _threading = dummy_threading; del dummy_threading
+
+    from _Util import response_seek_wrapper
+    from _Request import Request
+
+
+    class BaseHandler(urllib2.BaseHandler):
+        handler_order = 500
+
+        def __cmp__(self, other):
+            if not hasattr(other, "handler_order"):
+                # Try to preserve the old behavior of having custom classes
+                # inserted after default ones (works only for custom user
+                # classes which are not aware of handler_order).
+                return 0
+            return cmp(self.handler_order, other.handler_order)
+
+
+    # This fixes a bug in urllib2 as of Python 2.1.3 and 2.2.2
+    #  (http://www.python.org/sf/549151)
+    # 2.2.3 is broken here (my fault!), 2.3 is fixed.
+    class HTTPRedirectHandler(BaseHandler):
+        # maximum number of redirections to any single URL
+        # this is needed because of the state that cookies introduce
+        max_repeats = 4
+        # maximum total number of redirections (regardless of URL) before
+        # assuming we're in a loop
+        max_redirections = 10
+
+        # Implementation notes:
+
+        # To avoid the server sending us into an infinite loop, the request
+        # object needs to track what URLs we have already seen.  Do this by
+        # adding a handler-specific attribute to the Request object.  The value
+        # of the dict is used to count the number of times the same URL has
+        # been visited.  This is needed because visiting the same URL twice
+        # does not necessarily imply a loop, thanks to state introduced by
+        # cookies.
+
+        # Always unhandled redirection codes:
+        # 300 Multiple Choices: should not handle this here.
+        # 304 Not Modified: no need to handle here: only of interest to caches
+        #     that do conditional GETs
+        # 305 Use Proxy: probably not worth dealing with here
+        # 306 Unused: what was this for in the previous versions of protocol??
+
+        def redirect_request(self, newurl, req, fp, code, msg, headers):
+            """Return a Request or None in response to a redirect.
+
+            This is called by the http_error_30x methods when a redirection
+            response is received.  If a redirection should take place, return a
+            new Request to allow http_error_30x to perform the redirect;
+            otherwise, return None to indicate that an HTTPError should be
+            raised.
+
+            """
+            if code in (301, 302, 303, "refresh") or \
+                   (code == 307 and not req.has_data()):
+                # Strictly (according to RFC 2616), 301 or 302 in response to
+                # a POST MUST NOT cause a redirection without confirmation
+                # from the user (of urllib2, in this case).  In practice,
+                # essentially all clients do redirect in this case, so we do
+                # the same.
+                return Request(newurl,
+                               headers=req.headers,
+                               origin_req_host=req.get_origin_req_host(),
+                               unverifiable=True)
+            else:
+                raise HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+        def http_error_302(self, req, fp, code, msg, headers):
+            # Some servers (incorrectly) return multiple Location headers
+            # (so probably same goes for URI).  Use first header.
+            if headers.has_key('location'):
+                newurl = getheaders(headers, 'location')[0]
+            elif headers.has_key('uri'):
+                newurl = getheaders(headers, 'uri')[0]
+            else:
+                return
+            newurl = urlparse.urljoin(req.get_full_url(), newurl)
+
+            # XXX Probably want to forget about the state of the current
+            # request, although that might interact poorly with other
+            # handlers that also use handler-specific request attributes
+            new = self.redirect_request(newurl, req, fp, code, msg, headers)
+            if new is None:
+                return
+
+            # loop detection
+            # .redirect_dict has a key url if url was previously visited.
+            if hasattr(req, 'redirect_dict'):
+                visited = new.redirect_dict = req.redirect_dict
+                if (visited.get(newurl, 0) >= self.max_repeats or
+                    len(visited) >= self.max_redirections):
+                    raise HTTPError(req.get_full_url(), code,
+                                    self.inf_msg + msg, headers, fp)
+            else:
+                visited = new.redirect_dict = req.redirect_dict = {}
+            visited[newurl] = visited.get(newurl, 0) + 1
+
+            # Don't close the fp until we are sure that we won't use it
+            # with HTTPError.  
+            fp.read()
+            fp.close()
+
+            return self.parent.open(new)
+
+        http_error_301 = http_error_303 = http_error_307 = http_error_302
+        http_error_refresh = http_error_302
+
+        inf_msg = "The HTTP server returned a redirect error that would " \
+                  "lead to an infinite loop.\n" \
+                  "The last 30x error message was:\n"
+
+
+    class HTTPRequestUpgradeProcessor(BaseHandler):
+        # upgrade urllib2.Request to this module's Request
+        # yuck!
+        handler_order = 0  # before anything else
+
+        def http_request(self, request):
+            if not hasattr(request, "add_unredirected_header"):
+                newrequest = Request(request._Request__original, request.data,
+                                     request.headers)
+                try: newrequest.origin_req_host = request.origin_req_host
+                except AttributeError: pass
+                try: newrequest.unverifiable = request.unverifiable
+                except AttributeError: pass
+                request = newrequest
+            return request
+
+        https_request = http_request
+
+    class HTTPEquivProcessor(BaseHandler):
+        """Append META HTTP-EQUIV headers to regular HTTP headers."""
+        def http_response(self, request, response):
+            if not hasattr(response, "seek"):
+                response = response_seek_wrapper(response)
+            # grab HTTP-EQUIV headers and add them to the true HTTP headers
+            headers = response.info()
+            for hdr, val in parse_head(response):
+                # rfc822.Message interprets this as appending, not clobbering
+                headers[hdr] = val
+            response.seek(0)
+            return response
+
+        https_response = http_response
+
+    # XXX ATM this only takes notice of http responses -- probably
+    #   should be independent of protocol scheme (http, ftp, etc.)
+    class SeekableProcessor(BaseHandler):
+        """Make responses seekable."""
+
+        def http_response(self, request, response):
+            if not hasattr(response, "seek"):
+                return response_seek_wrapper(response)
+            return response
+
+        https_response = http_response
+
+    class HTTPCookieProcessor(BaseHandler):
+        """Handle HTTP cookies.
+
+        Public attributes:
+
+        cookiejar: CookieJar instance
+
+        """
+        def __init__(self, cookiejar=None):
+            if cookiejar is None:
+                cookiejar = CookieJar()
+            self.cookiejar = cookiejar
+
+        def http_request(self, request):
+            self.cookiejar.add_cookie_header(request)
+            return request
+
+        def http_response(self, request, response):
+            self.cookiejar.extract_cookies(response, request)
+            return response
+
+        https_request = http_request
+        https_response = http_response
+
+    try:
+        import robotparser
+    except ImportError:
+        pass
+    else:
+        class RobotExclusionError(urllib2.HTTPError):
+            def __init__(self, request, *args):
+                apply(urllib2.HTTPError.__init__, (self,)+args)
+                self.request = request
+
+        class HTTPRobotRulesProcessor(BaseHandler):
+            # before redirections and response debugging, after everything else
+            handler_order = 800
+
+            try:
+                from httplib import HTTPMessage
+            except:
+                from mimetools import Message
+                http_response_class = Message
+            else:
+                http_response_class = HTTPMessage
+
+            def __init__(self, rfp_class=robotparser.RobotFileParser):
+                self.rfp_class = rfp_class
+                self.rfp = None
+                self._host = None
+
+            def http_request(self, request):
+                host = request.get_host()
+                scheme = request.get_type()
+                if host != self._host:
+                    self.rfp = self.rfp_class()
+                    self.rfp.set_url(scheme+"://"+host+"/robots.txt")
+                    self.rfp.read()
+                    self._host = host
+
+                ua = request.get_header("User-agent", "")
+                if self.rfp.can_fetch(ua, request.get_full_url()):
+                    return request
+                else:
+                    msg = "request disallowed by robots.txt"
+                    raise RobotExclusionError(
+                        request,
+                        request.get_full_url(),
+                        403, msg,
+                        self.http_response_class(StringIO()), StringIO(msg))
+
+            https_request = http_request
+
+    class HTTPRefererProcessor(BaseHandler):
+        """Add Referer header to requests.
+
+        This only makes sense if you use each RefererProcessor for a single
+        chain of requests only (so, for example, if you use a single
+        HTTPRefererProcessor to fetch a series of URLs extracted from a single
+        page, this will break).
+
+        """
+        def __init__(self):
+            self.referer = None
+
+        def http_request(self, request):
+            if ((self.referer is not None) and
+                not request.has_header("Referer")):
+                request.add_unredirected_header("Referer", self.referer)
+            return request
+
+        def http_response(self, request, response):
+            self.referer = response.geturl()
+            return response
+
+        https_request = http_request
+        https_response = http_response
+
+    class HTTPResponseDebugProcessor(BaseHandler):
+        handler_order = 900  # before redirections, after everything else
+
+        def http_response(self, request, response):
+            if not hasattr(response, "seek"):
+                response = response_seek_wrapper(response)
+            info(response.read())
+            info("*****************************************************")
+            response.seek(0)
+            return response
+
+        https_response = http_response
+
+    class HTTPRedirectDebugProcessor(BaseHandler):
+        def http_request(self, request):
+            if hasattr(request, "redirect_dict"):
+                info("redirecting to %s", request.get_full_url())
+            return request
+
+    class HTTPRefreshProcessor(BaseHandler):
+        """Perform HTTP Refresh redirections.
+
+        Note that if a non-200 HTTP code has occurred (for example, a 30x
+        redirect), this processor will do nothing.
+
+        By default, only zero-time Refresh headers are redirected.  Use the
+        max_time attribute / constructor argument to allow Refresh with longer
+        pauses.  Use the honor_time attribute / constructor argument to control
+        whether the requested pause is honoured (with a time.sleep()) or
+        skipped in favour of immediate redirection.
+
+        Public attributes:
+
+        max_time: see above
+        honor_time: see above
+
+        """
+        handler_order = 1000
+
+        def __init__(self, max_time=0, honor_time=True):
+            self.max_time = max_time
+            self.honor_time = honor_time
+
+        def http_response(self, request, response):
+            code, msg, hdrs = response.code, response.msg, response.info()
+
+            if code == 200 and hdrs.has_key("refresh"):
+                refresh = getheaders(hdrs, "refresh")[0]
+                i = string.find(refresh, ";")
+                if i != -1:
+                    pause, newurl_spec = refresh[:i], refresh[i+1:]
+                    i = string.find(newurl_spec, "=")
+                    if i != -1:
+                        pause = int(pause)
+                        if (self.max_time is None) or (pause <= self.max_time):
+                            if pause != 0 and self.honor_time:
+                                time.sleep(pause)
+                            newurl = newurl_spec[i+1:]
+                            hdrs["location"] = newurl
+                            # hardcoded http is NOT a bug
+                            response = self.parent.error(
+                                "http", request, response,
+                                "refresh", msg, hdrs)
+
+            return response
+
+        https_response = http_response
+
+    class HTTPErrorProcessor(BaseHandler):
+        """Process HTTP error responses.
+
+        The purpose of this handler is to to allow other response processors a
+        look-in by removing the call to parent.error() from
+        AbstractHTTPHandler.
+
+        For non-200 error codes, this just passes the job on to the
+        Handler.<proto>_error_<code> methods, via the OpenerDirector.error
+        method.  Eventually, urllib2.HTTPDefaultErrorHandler will raise an
+        HTTPError if no other handler handles the error.
+
+        """
+        handler_order = 1000  # after all other processors
+
+        def http_response(self, request, response):
+            code, msg, hdrs = response.code, response.msg, response.info()
+
+            if code != 200:
+                # hardcoded http is NOT a bug
+                response = self.parent.error(
+                    "http", request, response, code, msg, hdrs)
+
+            return response
+
+        https_response = http_response
+
+
+    class AbstractHTTPHandler(BaseHandler):
+
+        def __init__(self, debuglevel=0):
+            self._debuglevel = debuglevel
+
+        def set_http_debuglevel(self, level):
+            self._debuglevel = level
+
+        def do_request_(self, request):
+            host = request.get_host()
+            if not host:
+                raise URLError('no host given')
+
+            if request.has_data():  # POST
+                data = request.get_data()
+                if not request.has_header('Content-type'):
+                    request.add_unredirected_header(
+                        'Content-type',
+                        'application/x-www-form-urlencoded')
+
+            scheme, sel = urllib.splittype(request.get_selector())
+            sel_host, sel_path = urllib.splithost(sel)
+            if not request.has_header('Host'):
+                request.add_unredirected_header('Host', sel_host or host)
+            for name, value in self.parent.addheaders:
+                name = string.capitalize(name)
+                if not request.has_header(name):
+                    request.add_unredirected_header(name, value)
+
+            return request
+
+        def do_open(self, http_class, req):
+            """Return an addinfourl object for the request, using http_class.
+
+            http_class must implement the HTTPConnection API from httplib.
+            The addinfourl return value is a file-like object.  It also
+            has methods and attributes including:
+                - info(): return a mimetools.Message object for the headers
+                - geturl(): return the original request URL
+                - code: HTTP status code
+            """
+            host = req.get_host()
+            if not host:
+                raise URLError('no host given')
+
+            h = http_class(host) # will parse host:port
+            h.set_debuglevel(self._debuglevel)
+
+            headers = req.headers.copy()
+            headers.update(req.unredirected_hdrs)
+            # We want to make an HTTP/1.1 request, but the addinfourl
+            # class isn't prepared to deal with a persistent connection.
+            # It will try to read all remaining data from the socket,
+            # which will block while the server waits for the next request.
+            # So make sure the connection gets closed after the (only)
+            # request.
+            headers["Connection"] = "close"
+            try:
+                h.request(req.get_method(), req.get_selector(), req.data, headers)
+                r = h.getresponse()
+            except socket.error, err: # XXX what error?
+                raise URLError(err)
+
+            # Pick apart the HTTPResponse object to get the addinfourl
+            # object initialized properly.
+
+            # Wrap the HTTPResponse object in socket's file object adapter
+            # for Windows.  That adapter calls recv(), so delegate recv()
+            # to read().  This weird wrapping allows the returned object to
+            # have readline() and readlines() methods.
+
+            # XXX It might be better to extract the read buffering code
+            # out of socket._fileobject() and into a base class.
+
+            r.recv = r.read
+            fp = socket._fileobject(r, 'rb', -1)
+
+            resp = urllib.addinfourl(fp, r.msg, req.get_full_url())
+            resp.code = r.status
+            resp.msg = r.reason
+            return resp
+
+
+    # XXX would self.reset() work, instead of raising this exception?
+    class EndOfHeadError(Exception): pass
+    class AbstractHeadParser:
+        # only these elements are allowed in or before HEAD of document
+        head_elems = ("html", "head",
+                      "title", "base",
+                      "script", "style", "meta", "link", "object")
+
+        def __init__(self):
+            self.http_equiv = []
+        def start_meta(self, attrs):
+            http_equiv = content = None
+            for key, value in attrs:
+                if key == "http-equiv":
+                    http_equiv = value
+                elif key == "content":
+                    content = value
+            if http_equiv is not None:
+                self.http_equiv.append((http_equiv, content))
+
+        def end_head(self):
+            raise EndOfHeadError()
+
+    # use HTMLParser if we have it (it does XHTML), htmllib otherwise
+    try:
+        import HTMLParser
+    except ImportError:
+        import htmllib, formatter
+        class HeadParser(AbstractHeadParser, htmllib.HTMLParser):
+            def __init__(self):
+                htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
+                AbstractHeadParser.__init__(self)
+
+            def handle_starttag(self, tag, method, attrs):
+                if tag in self.head_elems:
+                    method(attrs)
+                else:
+                    raise EndOfHeadError()
+
+            def handle_endtag(self, tag, method):
+                if tag in self.head_elems:
+                    method()
+                else:
+                    raise EndOfHeadError()
+
+        HEAD_PARSER_CLASS = HeadParser
+    else:
+        class XHTMLCompatibleHeadParser(AbstractHeadParser,
+                                        HTMLParser.HTMLParser):
+            def __init__(self):
+                HTMLParser.HTMLParser.__init__(self)
+                AbstractHeadParser.__init__(self)
+
+            def handle_starttag(self, tag, attrs):
+                if tag not in self.head_elems:
+                    raise EndOfHeadError()
+                try:
+                    method = getattr(self, 'start_' + tag)
+                except AttributeError:
+                    try:
+                        method = getattr(self, 'do_' + tag)
+                    except AttributeError:
+                        pass # unknown tag
+                    else:
+                        method(attrs)
+                else:
+                    method(attrs)
+
+            def handle_endtag(self, tag):
+                if tag not in self.head_elems:
+                    raise EndOfHeadError()
+                try:
+                    method = getattr(self, 'end_' + tag)
+                except AttributeError:
+                    pass # unknown tag
+                else:
+                    method()
+
+            # handle_charref, handle_entityref and default entitydefs are taken
+            # from sgmllib
+            def handle_charref(self, name):
+                try:
+                    n = int(name)
+                except ValueError:
+                    self.unknown_charref(name)
+                    return
+                if not 0 <= n <= 255:
+                    self.unknown_charref(name)
+                    return
+                self.handle_data(chr(n))
+
+            # Definition of entities -- derived classes may override
+            entitydefs = \
+                    {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
+
+            def handle_entityref(self, name):
+                table = self.entitydefs
+                if name in table:
+                    self.handle_data(table[name])
+                else:
+                    self.unknown_entityref(name)
+                    return
+
+            def unknown_entityref(self, ref):
+                self.handle_data("&%s;" % ref)
+
+            def unknown_charref(self, ref):
+                self.handle_data("&#%s;" % ref)
+
+        HEAD_PARSER_CLASS = XHTMLCompatibleHeadParser
+
+    def parse_head(fileobj):
+        """Return a list of key, value pairs."""
+        hp = HEAD_PARSER_CLASS()
+        while 1:
+            data = fileobj.read(CHUNK)
+            try:
+                hp.feed(data)
+            except EndOfHeadError:
+                break
+            if len(data) != CHUNK:
+                # this should only happen if there is no HTML body, or if
+                # CHUNK is big
+                break
+        return hp.http_equiv
+
+
+    class HTTPHandler(AbstractHTTPHandler):
+        def http_open(self, req):
+            return self.do_open(httplib.HTTPConnection, req)
+
+        http_request = AbstractHTTPHandler.do_request_
+
+    if hasattr(httplib, 'HTTPS'):
+        class HTTPSHandler(AbstractHTTPHandler):
+            def https_open(self, req):
+                return self.do_open(httplib.HTTPSConnection, req)
+
+            https_request = AbstractHTTPHandler.do_request_
+
+##     class HTTPHandler(AbstractHTTPHandler):
+##         def http_open(self, req):
+##             return self.do_open(httplib.HTTP, req)
+
+##         http_request = AbstractHTTPHandler.do_request_
+
+##     if hasattr(httplib, 'HTTPS'):
+##         class HTTPSHandler(AbstractHTTPHandler):
+##             def https_open(self, req):
+##                 return self.do_open(httplib.HTTPS, req)
+
+##             https_request = AbstractHTTPHandler.do_request_
+
+    if int(10*float(urllib2.__version__[:3])) >= 24:
+        # urllib2 supports processors already
+        from _Opener import OpenerMixin
+        class OpenerDirector(urllib2.OpenerDirector, OpenerMixin):
+            pass
+    else:
+        from _Opener import OpenerDirector
+
+    class OpenerFactory:
+        """This class's interface is quite likely to change."""
+
+        default_classes = [
+            # handlers
+            urllib2.ProxyHandler,
+            urllib2.UnknownHandler,
+            HTTPHandler,  # from this module (derived from new AbstractHTTPHandler)
+            urllib2.HTTPDefaultErrorHandler,
+            HTTPRedirectHandler,  # from this module (bugfixed)
+            urllib2.FTPHandler,
+            urllib2.FileHandler,
+            # processors
+            HTTPRequestUpgradeProcessor,
+            #HTTPEquivProcessor,
+            #SeekableProcessor,
+            HTTPCookieProcessor,
+            #HTTPRefererProcessor,
+            #HTTPRefreshProcessor,
+            HTTPErrorProcessor
+            ]
+        handlers = []
+        replacement_handlers = []
+
+        def __init__(self, klass=OpenerDirector):
+            self.klass = klass
+
+        def build_opener(self, *handlers):
+            """Create an opener object from a list of handlers and processors.
+
+            The opener will use several default handlers and processors, including
+            support for HTTP and FTP.
+
+            If any of the handlers passed as arguments are subclasses of the
+            default handlers, the default handlers will not be used.
+
+            """
+            opener = self.klass()
+            default_classes = list(self.default_classes)
+            if hasattr(httplib, 'HTTPS'):
+                default_classes.append(HTTPSHandler)
+            skip = []
+            for klass in default_classes:
+                for check in handlers:
+                    if type(check) == types.ClassType:
+                        if issubclass(check, klass):
+                            skip.append(klass)
+                    elif type(check) == types.InstanceType:
+                        if isinstance(check, klass):
+                            skip.append(klass)
+            for klass in skip:
+                default_classes.remove(klass)
+
+            for klass in default_classes:
+                opener.add_handler(klass())
+            for h in handlers:
+                if type(h) == types.ClassType:
+                    h = h()
+                opener.add_handler(h)
+
+            return opener
+
+    build_opener = OpenerFactory().build_opener
+
+    _opener = None
+    urlopen_lock = _threading.Lock()
+    def urlopen(url, data=None):
+        global _opener
+        if _opener is None:
+            urlopen_lock.acquire()
+            try:
+                if _opener is None:
+                    _opener = build_opener()
+            finally:
+                urlopen_lock.release()
+        return _opener.open(url, data)
+
+    def urlretrieve(url, filename=None, reporthook=None, data=None):
+        global _opener
+        if _opener is None:
+            urlopen_lock.acquire()
+            try:
+                if _opener is None:
+                    _opener = build_opener()
+            finally:
+                urlopen_lock.release()
+        return _opener.retrieve(url, filename, reporthook, data)
+
+    def install_opener(opener):
+        global _opener
+        _opener = opener


Property changes on: Zope3/branches/testbrowser-integration/src/zope/testbrowser/ClientCookie/_urllib2_support.py
___________________________________________________________________
Name: svn:eol-style
   + native

Modified: Zope3/branches/testbrowser-integration/src/zope/testbrowser/__init__.py
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/__init__.py	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/__init__.py	2005-10-06 20:23:16 UTC (rev 38827)
@@ -32,6 +32,16 @@
 import ClientForm as x
 assert x is ClientForm
 
+# stitch in ClientCookie
+from zope.testbrowser import ClientCookie
+
+# I don't really care, which version is there. We are doing no patching of the
+# code.
+if 'ClientCookie' not in sys.modules:
+    sys.modules['ClientCookie'] = ClientCookie
+else:
+    pass
+
 # stitch in pullparser
 from zope.testbrowser import pullparser
 

Modified: Zope3/branches/testbrowser-integration/src/zope/testbrowser/over_the_wire.txt
===================================================================
--- Zope3/branches/testbrowser-integration/src/zope/testbrowser/over_the_wire.txt	2005-10-06 20:22:37 UTC (rev 38826)
+++ Zope3/branches/testbrowser-integration/src/zope/testbrowser/over_the_wire.txt	2005-10-06 20:23:16 UTC (rev 38827)
@@ -16,7 +16,9 @@
 
 The browser can `open` web pages:
 
-    >>> browser.open('http://google.com')
+    # This is tricky, since in Germany I am forwarded to google.de usually;
+    # The `ncr` forces to really go to google.com.
+    >>> browser.open('http://google.com/ncr')
     >>> browser.url
     'http://www.google.com/'
 



More information about the Zope3-Checkins mailing list