]> review.fuel-infra Code Review - packages/trusty/python-eventlet.git/commitdiff
Update package python-eventlet for MOS 7.0 08/8508/11
authorMikhail Ivanov <mivanov@mirantis.com>
Fri, 26 Jun 2015 10:13:17 +0000 (13:13 +0300)
committerMikhail Ivanov <mivanov@mirantis.com>
Fri, 26 Jun 2015 12:01:04 +0000 (15:01 +0300)
Update package for MOS 7.0
Update subrevision, because wrong package in our repo has similar version
Sources from https://pypi.python.org/pypi/eventlet/0.16.1
Closes-Bug:#1469099

Change-Id: I71d6c76a9e471c770c731da920aeaef8afe0030b

101 files changed:
debian/changelog
debian/docs
debian/patches/use-packaged-python-mock-rather-than-embedded.patch
eventlet/AUTHORS
eventlet/MANIFEST.in
eventlet/NEWS
eventlet/PKG-INFO
eventlet/README.rst
eventlet/README.twisted [deleted file]
eventlet/doc/index.rst
eventlet/doc/modules/websocket.rst
eventlet/doc/modules/wsgi.rst
eventlet/doc/ssl.rst
eventlet/doc/testing.rst
eventlet/eventlet.egg-info/PKG-INFO
eventlet/eventlet.egg-info/SOURCES.txt
eventlet/eventlet/__init__.py
eventlet/eventlet/api.py [deleted file]
eventlet/eventlet/backdoor.py
eventlet/eventlet/convenience.py
eventlet/eventlet/coros.py
eventlet/eventlet/db_pool.py
eventlet/eventlet/green/_socket_nodns.py
eventlet/eventlet/green/builtin.py
eventlet/eventlet/green/os.py
eventlet/eventlet/green/profile.py
eventlet/eventlet/green/select.py
eventlet/eventlet/green/socket.py
eventlet/eventlet/green/ssl.py
eventlet/eventlet/green/subprocess.py
eventlet/eventlet/green/thread.py
eventlet/eventlet/green/threading.py
eventlet/eventlet/green/zmq.py
eventlet/eventlet/greenio.py
eventlet/eventlet/greenthread.py
eventlet/eventlet/hubs/__init__.py
eventlet/eventlet/hubs/hub.py
eventlet/eventlet/hubs/pyevent.py
eventlet/eventlet/hubs/twistedr.py [deleted file]
eventlet/eventlet/patcher.py
eventlet/eventlet/pool.py [deleted file]
eventlet/eventlet/proc.py [deleted file]
eventlet/eventlet/processes.py [deleted file]
eventlet/eventlet/queue.py
eventlet/eventlet/support/__init__.py
eventlet/eventlet/support/greendns.py
eventlet/eventlet/support/greenlets.py
eventlet/eventlet/support/six.py
eventlet/eventlet/timeout.py
eventlet/eventlet/tpool.py
eventlet/eventlet/twistedutil/__init__.py [deleted file]
eventlet/eventlet/twistedutil/join_reactor.py [deleted file]
eventlet/eventlet/twistedutil/protocol.py [deleted file]
eventlet/eventlet/twistedutil/protocols/__init__.py [deleted file]
eventlet/eventlet/twistedutil/protocols/basic.py [deleted file]
eventlet/eventlet/util.py [deleted file]
eventlet/eventlet/websocket.py
eventlet/eventlet/wsgi.py
eventlet/examples/twisted/twisted_client.py [deleted file]
eventlet/examples/twisted/twisted_http_proxy.py [deleted file]
eventlet/examples/twisted/twisted_portforward.py [deleted file]
eventlet/examples/twisted/twisted_server.py [deleted file]
eventlet/examples/twisted/twisted_srvconnector.py [deleted file]
eventlet/examples/twisted/twisted_xcap_proxy.py [deleted file]
eventlet/tests/__init__.py
eventlet/tests/api_test.py
eventlet/tests/backdoor_test.py
eventlet/tests/convenience_test.py
eventlet/tests/db_pool_test.py
eventlet/tests/debug_test.py
eventlet/tests/env_test.py
eventlet/tests/fork_test.py
eventlet/tests/greenio_test.py
eventlet/tests/hub_test.py
eventlet/tests/manual/greenio_memtest.py
eventlet/tests/mock.py
eventlet/tests/mysqldb_test.py
eventlet/tests/mysqldb_test_monkey_patch.py
eventlet/tests/parse_results.py
eventlet/tests/patcher_test.py
eventlet/tests/patcher_test_importlib_lock.py [new file with mode: 0644]
eventlet/tests/processes_test.py [deleted file]
eventlet/tests/queue_test.py
eventlet/tests/socket_test.py [new file with mode: 0644]
eventlet/tests/ssl_test.py
eventlet/tests/stdlib/all.py
eventlet/tests/stdlib/test_thread__boundedsem.py
eventlet/tests/stdlib/test_urllib2.py
eventlet/tests/subprocess_test.py
eventlet/tests/test__coros_queue.py [deleted file]
eventlet/tests/test__event.py
eventlet/tests/test__pool.py [deleted file]
eventlet/tests/test__proc.py [deleted file]
eventlet/tests/test__socket_errors.py
eventlet/tests/test__twistedutil.py [deleted file]
eventlet/tests/test__twistedutil_protocol.py [deleted file]
eventlet/tests/websocket_new_test.py
eventlet/tests/websocket_test.py
eventlet/tests/wsgi_test.py
eventlet/tests/wsgi_test_conntimeout.py
tests/runtests.sh [new file with mode: 0644]

index 4623f70689258ce2634179534441d37a547fbd66..2c24f5524c42618cb2ed9994298ea22023203ef6 100644 (file)
@@ -1,3 +1,13 @@
+python-eventlet (0.16.1-1~u14.04+mos2) mos7.0; urgency=medium
+
+  * Update package version for MOS7.0
+  * Also, update subrevision, because wrong package in our repo has a
+    similar version
+  * Sources from https://pypi.python.org/pypi/eventlet/0.16.1
+  * LP #1469099
+
+ -- Mikhail Ivanov <mivanov@mirantis.com> Fri, 26 Jun 2015 13:19:34 +0300
+
 python-eventlet (0.15.2-1~u14.04+mos1) mos6.1; urgency=medium
 
   * Adjust the package revision according to the versioning policy
index 573a6ac98f3677a28e4990b8c4f749c6b38ee66f..a1320b1b4ab22e9be9bbc9d5777c0e31aa4011d5 100644 (file)
@@ -1,2 +1 @@
 README.rst
-README.twisted
index 423c8930b2227a5292929b80c8c125fa5733cd92..12c6d10ac53dfd4fa41a9a25bf573e7a7dba7cdc 100644 (file)
@@ -18,13 +18,13 @@ Last-Update: 2014-09-07
  from eventlet.support import six
 --- python-eventlet-0.15.2.orig/tests/websocket_test.py
 +++ python-eventlet-0.15.2/tests/websocket_test.py
-@@ -9,7 +9,8 @@ from eventlet.green import httplib
- from eventlet.green import urllib2
+@@ -8,7 +8,8 @@
+ from eventlet.support import six
  from eventlet.websocket import WebSocket, WebSocketWSGI
--from tests import mock, LimitedTestCase, certificate_file, private_key_file
+
+-from tests import certificate_file, LimitedTestCase, mock, private_key_file
 +import mock
 +from tests import LimitedTestCase, certificate_file, private_key_file
  from tests import skip_if_no_ssl
  from tests.wsgi_test import _TestBase
+
index c5437d0dac22588bdb774d4feac67ee7781765d5..df79f4874c720d986800465bd5c381561248ccc1 100644 (file)
@@ -99,3 +99,15 @@ Thanks To
 * Jan Grant, Michael Kerrin, second simultaneous read (Github #94)
 * Simon Jagoe, Python3 octal literal fix
 * Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
+* raylu, fixing operator precedence bug in eventlet.wsgi
+* Christoph Gysin, PEP 8 conformance
+* Andrey Gubarev
+* Corey Wright
+* Deva
+* Johannes Erdfelt
+* Kevin
+* QthCN
+* Steven Hardy
+* Stuart McLaren
+* Tomaz Muraus
+* Victor Stinner
index 196ff1fa368bf90904a016645157d1cdb9ee04c6..d554d075c0702b783a8ef5f55ee12717664da9c6 100644 (file)
@@ -1,4 +1,4 @@
 recursive-include tests *.py *.crt *.key
 recursive-include doc *.rst *.txt *.py Makefile *.png
 recursive-include examples *.py *.html
-include MANIFEST.in README.twisted NEWS AUTHORS LICENSE README.rst
+include MANIFEST.in NEWS AUTHORS LICENSE README.rst
index 1ff4aaae5dbd9b9a9a3945fcca281d89ceb4b465..47767c315d01b6c5923d9de8ac073d59f8f51aa0 100644 (file)
@@ -1,12 +1,33 @@
+0.16.1
+======
+
+* Wheel build 0.16.0 incorrectly shipped removed module eventlet.util.
+
+0.16.0
+======
+
+* Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak
+* Fix monkey_patch() on Python 3; Thanks to Victor Stinner
+* Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak
+* db_pool: BaseConnectionPool.clear updates .current_size #139; Thanks to Andrey Gubarev
+* Fix __str__ method on the TimeoutExpired exception class.; Thanks to Tomaz Muraus
+* hubs: drop Twisted support
+* Removed deprecated modules: api, most of coros, pool, proc, processes and util
+* Improved Python 3 compatibility (including patch by raylu); Thanks to Jakub Stasiak
+* Allow more graceful shutdown of wsgi server; Thanks to Stuart McLaren
+* wsgi.input: Make send_hundred_continue_headers() a public API; Thanks to Tushar Gohad
+* tpool: Windows compatibility, fix ResourceWarning. Thanks to Victor Stinner
+* tests: Fix timers not cleaned up on MySQL test skips; Thanks to Corey Wright
+
 0.15.2
 ======
-greenio: fixed memory leak, introduced in 0.15.1; Thanks to Michael Kerrin, Tushar Gohad
-wsgi: Support optional headers w/ "100 Continue" responses; Thanks to Tushar Gohad
+greenio: fixed memory leak, introduced in 0.15.1; Thanks to Michael Kerrin, Tushar Gohad
+wsgi: Support optional headers w/ "100 Continue" responses; Thanks to Tushar Gohad
 
 0.15.1
 ======
-greenio: Fix second simultaneous read (parallel paramiko issue); Thanks to Jan Grant, Michael Kerrin
-db_pool: customizable connection cleanup function; Thanks to Avery Fay
+greenio: Fix second simultaneous read (parallel paramiko issue); Thanks to Jan Grant, Michael Kerrin
+db_pool: customizable connection cleanup function; Thanks to Avery Fay
 
 0.15
 ====
index dc29367f1701d6ffc62195857873cdd418bb8de8..aff4111ae91213039722b56d7f33ff2eda6ccbb1 100644 (file)
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: eventlet
-Version: 0.15.2
+Version: 0.16.1
 Summary: Highly concurrent networking library
 Home-page: http://eventlet.net
 Author: Linden Lab
@@ -10,8 +10,8 @@ Description: Eventlet is a concurrent networking library for Python that allows
         
         It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
         
-        It's easy to get started using Eventlet, and easy to convert existing 
-        applications to use it.  Start off by looking at the `examples`_, 
+        It's easy to get started using Eventlet, and easy to convert existing
+        applications to use it.  Start off by looking at the `examples`_,
         `common design patterns`_, and the list of `basic API primitives`_.
         
         .. _examples: http://eventlet.net/doc/examples.html
@@ -25,7 +25,7 @@ Description: Eventlet is a concurrent networking library for Python that allows
         Here's something you can try right on the command line::
         
             % python
-            >>> import eventlet 
+            >>> import eventlet
             >>> from eventlet.green import urllib2
             >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
             >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
@@ -57,6 +57,20 @@ Description: Eventlet is a concurrent networking library for Python that allows
         
         The built html files can be found in doc/_build/html afterward.
         
+        
+        Twisted
+        =======
+        
+        Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
+        now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
+        
+        If you have a project that uses Eventlet with Twisted, your options are:
+        
+        * use last working release eventlet==0.14
+        * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
+        `EVENTLET_HUB` environment variable can point to external modules.
+        * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
+        
 Platform: UNKNOWN
 Classifier: License :: OSI Approved :: MIT License
 Classifier: Programming Language :: Python
index 9cf52d1d58a75cc1e5316b99be15aadc70734e0c..ce554ed620d15815197461e7ddf1fe618e5df13d 100644 (file)
@@ -2,8 +2,8 @@ Eventlet is a concurrent networking library for Python that allows you to change
 
 It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
 
-It's easy to get started using Eventlet, and easy to convert existing 
-applications to use it.  Start off by looking at the `examples`_, 
+It's easy to get started using Eventlet, and easy to convert existing
+applications to use it.  Start off by looking at the `examples`_,
 `common design patterns`_, and the list of `basic API primitives`_.
 
 .. _examples: http://eventlet.net/doc/examples.html
@@ -17,7 +17,7 @@ Quick Example
 Here's something you can try right on the command line::
 
     % python
-    >>> import eventlet 
+    >>> import eventlet
     >>> from eventlet.green import urllib2
     >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
     >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
@@ -48,3 +48,17 @@ To build a complete set of HTML documentation, you must have Sphinx, which can b
   make html
 
 The built html files can be found in doc/_build/html afterward.
+
+
+Twisted
+=======
+
+Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
+now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
+
+If you have a project that uses Eventlet with Twisted, your options are:
+
+* use last working release eventlet==0.14
+* start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
+`EVENTLET_HUB` environment variable can point to external modules.
+* fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
diff --git a/eventlet/README.twisted b/eventlet/README.twisted
deleted file mode 100644 (file)
index 7669693..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
---work in progress--
-
-Introduction
-------------
-Twisted provides solid foundation for asynchronous programming in Python.
-Eventlet makes asynchronous programming look like synchronous, thus
-achieving higher signal-to-noise ratio than traditional twisted programs have.
-
-Eventlet on top of twisted provides:
- * stable twisted
- * usable and readable synchronous style
- * existing twisted code can be used without any changes
- * existing blocking code can be used after trivial changes applied
-
-NOTE: the maintainer of Eventlet's Twisted support no longer supports it; it still exists but may have had some breakage along the way.  Please treat it as experimental, and if you'd like to maintain it, please do!
-
-Eventlet features:
-
- * utilities for spawning and controlling greenlet execution:
-   api.spawn, api.kill, proc module
- * utilities for communicating between greenlets:
-   event.Event, queue.Queue, semaphore.Semaphore
- * standard Python modules that won't block the reactor:
-   eventlet.green package
- * utilities specific to twisted hub:
-   eventlet.twistedutil package
-
-
-Getting started with eventlet on twisted
-----------------------------------------
-
-This section will only mention stuff that may be useful but it
-won't explain in details how to use it. For that, refer to the
-docstrings of the modules and the examples.
-
-There are 2 ways of using twisted with eventlet, one that is
-familiar to twisted developers and another that is familiar
-to eventlet developers:
-
- 1. explicitly start the main loop in the main greenlet;
- 2. implicitly start the main loop in a dedicated greenlet.
-
-To enable (1), add this line at the top of your program:
-from eventlet.twistedutil import join_reactor
-then start the reactor as you would do in a regular twisted application.
-
-For (2) just make sure that you have reactor installed before using
-any of eventlet functions. Otherwise a non-twisted hub will be selected
-and twisted code won't work.
-
-Most of examples/twisted_* use twisted style with the exception of
-twisted_client.py and twisted_srvconnector.py. All of the non-twisted
-examples in examples directory use eventlet-style (they work with any
-of eventlet's hubs, not just twisted-based).
-
-Eventlet implements "blocking" operations by switching to the main loop
-greenlet, thus it's impossible to call a blocking function when you are
-already in the main loop. Therefore one must be cautious in a twisted
-callback, calling only a non-blocking subset of eventlet API here. The
-following functions won't unschedule the current greenlet and are safe
-to call from anywhere:
-
-1. Greenlet creation functions: api.spawn, proc.spawn,
-   twistedutil.deferToGreenThread and others based on api.spawn.
-
-2. send(), send_exception(), poll(), ready() methods of event.Event
-   and queue.Queue.
-
-3. wait(timeout=0) is identical to poll(). Currently only Proc.wait
-   supports timeout parameter.
-
-4. Proc.link/link_value/link_exception
-
-Other classes that use these names should follow the convention.
-
-For an example on how to take advantage of eventlet in a twisted
-application using deferToGreenThread see examples/twisted_http_proxy.py
-
-Although eventlet provides eventlet.green.socket module that implements
-interface of the standard Python socket, there's also a way to use twisted's
-network code in a synchronous fashion via GreenTransport class.
-A GreenTransport interface is reminiscent of socket but it's not a drop-in
-replacement. It combines features of TCPTransport and Protocol in a single
-object:
-
- * all of transport methods (like getPeer()) are available directly on
-   a GreenTransport instance; in addition, underlying transport object
-   is available via 'transport' attribute;
- * write method is overriden: it may block if transport write buffer is full;
- * read() and recv() methods are provided to retrieve the data from protocol
-   synchronously.
-
-To make a GreenTransport instance use twistedutil.protocol.GreenClientCreator
-(usage is similar to that of twisted.internet.protocol.ClientCreator)
-
-For an example on how to get a connected GreenTransport instance,
-see twisted_client.py, twisted_srvconnect.py or twisted_portforward.py.
-For an example on how to use GreenTransport for incoming connections,
-see twisted_server.py, twisted_portforward.py.
-
-
-also
-* twistedutil.block_on - wait for a deferred to fire
-  block_on(reactor.callInThread(func, args))
-* twistedutil.protocol.basic.LineOnlyReceiverTransport - a green transport
-  variant built on top of LineOnlyReceiver protocol. Demonstrates how
-  to convert a protocol to a synchronous mode.
-
-
-Coroutines
-----------
-
-To understand how eventlet works, one has to understand how to use greenlet:
-http://codespeak.net/py/dist/greenlet.html
-
-Essential points
-
-* There always exists MAIN greenlet
-* Every greenlet except MAIN has a parent. MAIN therefore could be detected as g.parent is None
-* When greenlet is finished it's return value is propagated to the parent (i.e. switch() call
-  in the parent greenlet returns it)
-* When an exception leaves a greelen, it's propagated to the parent (i.e. switch() in the parent
-  re-raises it) unless it's a subclass of GreenletExit, which is returned as a value.
-* parent can be reassigned (by simply setting 'parent' attribute). A cycle would be detected and
-  rejected with ValueError
-
-
-Note, that there's no scheduler of any sort; if a coroutine wants to be
-scheduled again it must take care of it itself. As an application developer,
-however, you don't need to worry about it as that's what eventlet does behind
-the scenes. The cost of that is that you should not use greenlet's switch() and
-throw() methods, they will likely leave the current greenlet unscheduled
-forever. Eventlet also takes advantage of greenlet's `parent' attribute,
-so you should not meddle with it either.
-
-
-How does eventlet work
-----------------------
-
-Twisted's reactor and eventlet's hub are very similar in what they do.
-Both continuously perform polling on the list of registered descriptors
-and each time a specific event is fired, the associated callback function
-is called. In addition, both maintain a list of scheduled calls.
-
-Polling is performed by the main loop - a function that both reactor and hub have.
-When twisted calls user's callback it's expected to return almost immediately,
-without any blocking I/O calls.
-
-Eventlet runs the main loop in a dedicated greenlet (MAIN_LOOP). It is the same
-greenlet as MAIN if you use join_reactor. Otherwise it's a separate greenlet
-started implicitly. The execution is organized in a such way that the switching
-always involves MAIN_LOOP. All of functions in eventlet that appear "blocking"
-use the following algorithm:
-
-1. register a callback that switches back to the current greenlet when
-   an event of interest happens
-2. switch to the MAIN_LOOP
-
-For example, here's what eventlet's socket recv() does:
-
-= blocking operation RECV on socket d =
-
-user's greenlet (USER)             main loop's greenlet (MAIN_LOOP)
-      |
-(inside d.recv() call)
-      |
-add_descriptor(d, RECV)
-      |
-data=MAIN_LOOP.switch() ---------> poll for events
-  ^---------------------\               |
-                        |              ... ---------------------------> may execute other greenlets here
-                        |               |
-                        |          event RECV on descriptor d?
-                        |               |
-                        |          d.remove_descriptor(d, RECV)
-                        |               |
-                        |          data = d.recv() # calling blocking op that will return immediately
-                        |               |
-                        \--------- USER.switch(data) # argument data here becomes return value in user's switch
-  return data
-
index b05be08c115e30d9745384170ecab7d0d4266d7a..608c29b2f7bc3d17b5d1cff672983ad6df466ba2 100644 (file)
@@ -1,21 +1,25 @@
 Eventlet Documentation
 ====================================
 
-Code talks!  This is a simple web crawler that fetches a bunch of urls concurrently::
+Code talks!  This is a simple web crawler that fetches a bunch of urls concurrently:
 
-    urls = ["http://www.google.com/intl/en_ALL/images/logo.gif",
-         "https://wiki.secondlife.com/w/images/secondlife.jpg",
-         "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"]
+.. code-block:: python
+
+    urls = [
+        "http://www.google.com/intl/en_ALL/images/logo.gif",
+        "http://python.org/images/python-logo.gif",
+        "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif",
+    ]
 
     import eventlet
     from eventlet.green import urllib2
 
     def fetch(url):
-      return urllib2.urlopen(url).read()
+        return urllib2.urlopen(url).read()
 
     pool = eventlet.GreenPool()
     for body in pool.imap(fetch, urls):
-      print("got body", len(body))
+        print("got body", len(body))
 
 Contents
 =========
index b42a25f909af89733f3d3a5cd05045e185a76e2a..9a94fda4d3db8f7660dbb7d5bcdb8078059cae62 100644 (file)
@@ -18,6 +18,11 @@ To create a websocket server, simply decorate a handler method with
     
     wsgi.server(eventlet.listen(('', 8090)), hello_world)
 
+.. note::
+
+    Please see graceful termination warning in :func:`~eventlet.wsgi.server`
+    documentation
+
 
 You can find a slightly more elaborate version of this code in the file
 ``examples/websocket.py``.
index 6fecdbdc76e2f0ac9f26a33f0d35cea40d70a219..4d4d634e2ccdceb06d1ce763dcfc086e0a5467bb 100644 (file)
@@ -113,3 +113,18 @@ as shown in the following example::
 You can find a more elaborate example in the file:
 ``tests/wsgi_test.py``, :func:`test_024a_expect_100_continue_with_headers`.
 
+
+Per HTTP RFC 7231 (http://tools.ietf.org/html/rfc7231#section-6.2) a client is
+required to be able to process one or more 100 continue responses.  A sample
+use case might be a user protocol where the server may want to use a 100-continue
+response to indicate to a client that it is working on a request and the 
+client should not timeout.
+
+To support multiple 100-continue responses, evenlet wsgi module exports
+the API :func:`send_hundred_continue_response`.
+
+Sample use cases for chunked and non-chunked HTTP scenarios are included
+in the wsgi test case ``tests/wsgi_test.py``,
+:func:`test_024b_expect_100_continue_with_headers_multiple_chunked` and
+:func:`test_024c_expect_100_continue_with_headers_multiple_nonchunked`.
+
index 0d47364463e1f2168e3734693331c2ccb61fe55c..225ce5659f2f1cb7b6045ac257c9d6c37bfb3b0f 100644 (file)
@@ -6,8 +6,8 @@ Eventlet makes it easy to use non-blocking SSL sockets.  If you're using Python
 In either case, the the ``green`` modules handle SSL sockets transparently, just like their standard counterparts.  As an example, :mod:`eventlet.green.urllib2` can be used to fetch https urls in as non-blocking a fashion as you please::
 
     from eventlet.green import urllib2
-    from eventlet import coros
-    bodies = [coros.execute(urllib2.urlopen, url)
+    from eventlet import spawn
+    bodies = [spawn(urllib2.urlopen, url)
          for url in ("https://secondlife.com","https://google.com")]
     for b in bodies:
         print(b.wait().read())
@@ -55,4 +55,4 @@ Here's an example of a server::
     client_conn.close()
     connection.close()
 
-.. _pyOpenSSL: https://launchpad.net/pyopenssl
\ No newline at end of file
+.. _pyOpenSSL: https://launchpad.net/pyopenssl
index 1e7a887e93c2379a6247a08a5f2b875c28e88145..619e0aa983af32026ed5dcc837b53517353e3b72 100644 (file)
@@ -21,7 +21,7 @@ Lastly, you can just use nose directly if you want:
 
 That's it!  The output from running nose is the same as unittest's output, if the entire directory was one big test file.
 
-Many tests are skipped based on environmental factors; for example, it makes no sense to test Twisted-specific functionality when Twisted is not installed.  These are printed as S's during execution, and in the summary printed after the tests run it will tell you how many were skipped.
+Many tests are skipped based on environmental factors; for example, it makes no sense to test kqueue-specific functionality when your OS does not support it.  These are printed as S's during execution, and in the summary printed after the tests run it will tell you how many were skipped.
 
 Doctests
 --------
index dc29367f1701d6ffc62195857873cdd418bb8de8..aff4111ae91213039722b56d7f33ff2eda6ccbb1 100644 (file)
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: eventlet
-Version: 0.15.2
+Version: 0.16.1
 Summary: Highly concurrent networking library
 Home-page: http://eventlet.net
 Author: Linden Lab
@@ -10,8 +10,8 @@ Description: Eventlet is a concurrent networking library for Python that allows
         
         It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
         
-        It's easy to get started using Eventlet, and easy to convert existing 
-        applications to use it.  Start off by looking at the `examples`_, 
+        It's easy to get started using Eventlet, and easy to convert existing
+        applications to use it.  Start off by looking at the `examples`_,
         `common design patterns`_, and the list of `basic API primitives`_.
         
         .. _examples: http://eventlet.net/doc/examples.html
@@ -25,7 +25,7 @@ Description: Eventlet is a concurrent networking library for Python that allows
         Here's something you can try right on the command line::
         
             % python
-            >>> import eventlet 
+            >>> import eventlet
             >>> from eventlet.green import urllib2
             >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
             >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
@@ -57,6 +57,20 @@ Description: Eventlet is a concurrent networking library for Python that allows
         
         The built html files can be found in doc/_build/html afterward.
         
+        
+        Twisted
+        =======
+        
+        Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
+        now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
+        
+        If you have a project that uses Eventlet with Twisted, your options are:
+        
+        * use last working release eventlet==0.14
+        * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
+        `EVENTLET_HUB` environment variable can point to external modules.
+        * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
+        
 Platform: UNKNOWN
 Classifier: License :: OSI Approved :: MIT License
 Classifier: Programming Language :: Python
index ecdfbe6e9f84ea14c152adaad407b067bb0a65c1..9124c3d4de17669a94b86e3998ab6735ab786c4a 100644 (file)
@@ -3,7 +3,6 @@ LICENSE
 MANIFEST.in
 NEWS
 README.rst
-README.twisted
 setup.cfg
 setup.py
 doc/Makefile
@@ -39,7 +38,6 @@ doc/modules/websocket.rst
 doc/modules/wsgi.rst
 doc/modules/zmq.rst
 eventlet/__init__.py
-eventlet/api.py
 eventlet/backdoor.py
 eventlet/convenience.py
 eventlet/corolocal.py
@@ -51,15 +49,11 @@ eventlet/greenio.py
 eventlet/greenpool.py
 eventlet/greenthread.py
 eventlet/patcher.py
-eventlet/pool.py
 eventlet/pools.py
-eventlet/proc.py
-eventlet/processes.py
 eventlet/queue.py
 eventlet/semaphore.py
 eventlet/timeout.py
 eventlet/tpool.py
-eventlet/util.py
 eventlet/websocket.py
 eventlet/wsgi.py
 eventlet.egg-info/PKG-INFO
@@ -107,7 +101,6 @@ eventlet/hubs/poll.py
 eventlet/hubs/pyevent.py
 eventlet/hubs/selects.py
 eventlet/hubs/timer.py
-eventlet/hubs/twistedr.py
 eventlet/support/__init__.py
 eventlet/support/greendns.py
 eventlet/support/greenlets.py
@@ -116,11 +109,6 @@ eventlet/support/pylib.py
 eventlet/support/six.py
 eventlet/support/stacklesspypys.py
 eventlet/support/stacklesss.py
-eventlet/twistedutil/__init__.py
-eventlet/twistedutil/join_reactor.py
-eventlet/twistedutil/protocol.py
-eventlet/twistedutil/protocols/__init__.py
-eventlet/twistedutil/protocols/basic.py
 examples/chat_bridge.py
 examples/chat_server.py
 examples/connect.py
@@ -139,12 +127,6 @@ examples/websocket_chat.py
 examples/wsgi.py
 examples/zmq_chat.py
 examples/zmq_simple.py
-examples/twisted/twisted_client.py
-examples/twisted/twisted_http_proxy.py
-examples/twisted/twisted_portforward.py
-examples/twisted/twisted_server.py
-examples/twisted/twisted_srvconnector.py
-examples/twisted/twisted_xcap_proxy.py
 tests/__init__.py
 tests/api_test.py
 tests/backdoor_test.py
@@ -168,21 +150,17 @@ tests/nosewrapper.py
 tests/parse_results.py
 tests/patcher_psycopg_test.py
 tests/patcher_test.py
+tests/patcher_test_importlib_lock.py
 tests/pools_test.py
-tests/processes_test.py
 tests/queue_test.py
 tests/semaphore_test.py
+tests/socket_test.py
 tests/ssl_test.py
 tests/subprocess_test.py
-tests/test__coros_queue.py
 tests/test__event.py
 tests/test__greenness.py
-tests/test__pool.py
-tests/test__proc.py
 tests/test__refcount.py
 tests/test__socket_errors.py
-tests/test__twistedutil.py
-tests/test__twistedutil_protocol.py
 tests/test_server.crt
 tests/test_server.key
 tests/thread_test.py
index e8bd5c1a19388e514a3e53a2958f3f50bb5ca6e9..7b7b3bf4392317d8d28d94b42ed1359f03785f4c 100644 (file)
@@ -1,4 +1,4 @@
-version_info = (0, 15, 2)
+version_info = (0, 16, 1)
 __version__ = '.'.join(map(str, version_info))
 
 try:
diff --git a/eventlet/eventlet/api.py b/eventlet/eventlet/api.py
deleted file mode 100644 (file)
index ca1c1ba..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-import errno
-import sys
-import socket
-import string
-import linecache
-import inspect
-import warnings
-
-from eventlet.support import greenlets as greenlet
-from eventlet import hubs
-from eventlet import greenthread
-from eventlet import debug
-from eventlet import Timeout
-
-__all__ = [
-    'call_after', 'exc_after', 'getcurrent', 'get_default_hub', 'get_hub',
-    'GreenletExit', 'kill', 'sleep', 'spawn', 'spew', 'switch',
-    'ssl_listener', 'tcp_listener', 'trampoline',
-    'unspew', 'use_hub', 'with_timeout', 'timeout']
-
-warnings.warn(
-    "eventlet.api is deprecated!  Nearly everything in it has moved "
-    "to the eventlet module.", DeprecationWarning, stacklevel=2)
-
-
-def get_hub(*a, **kw):
-    warnings.warn(
-        "eventlet.api.get_hub has moved to eventlet.hubs.get_hub",
-        DeprecationWarning, stacklevel=2)
-    return hubs.get_hub(*a, **kw)
-
-
-def get_default_hub(*a, **kw):
-    warnings.warn(
-        "eventlet.api.get_default_hub has moved to"
-        " eventlet.hubs.get_default_hub",
-        DeprecationWarning, stacklevel=2)
-    return hubs.get_default_hub(*a, **kw)
-
-
-def use_hub(*a, **kw):
-    warnings.warn(
-        "eventlet.api.use_hub has moved to eventlet.hubs.use_hub",
-        DeprecationWarning, stacklevel=2)
-    return hubs.use_hub(*a, **kw)
-
-
-def switch(coro, result=None, exc=None):
-    if exc is not None:
-        return coro.throw(exc)
-    return coro.switch(result)
-
-Greenlet = greenlet.greenlet
-
-
-def tcp_listener(address, backlog=50):
-    """
-    Listen on the given ``(ip, port)`` *address* with a TCP socket.  Returns a
-    socket object on which one should call ``accept()`` to accept a connection
-    on the newly bound socket.
-    """
-    warnings.warn(
-        """eventlet.api.tcp_listener is deprecated.  Please use eventlet.listen instead.""",
-        DeprecationWarning, stacklevel=2)
-
-    from eventlet import greenio, util
-    socket = greenio.GreenSocket(util.tcp_socket())
-    util.socket_bind_and_listen(socket, address, backlog=backlog)
-    return socket
-
-
-def ssl_listener(address, certificate, private_key):
-    """Listen on the given (ip, port) *address* with a TCP socket that
-    can do SSL.  Primarily useful for unit tests, don't use in production.
-
-    *certificate* and *private_key* should be the filenames of the appropriate
-    certificate and private key files to use with the SSL socket.
-
-    Returns a socket object on which one should call ``accept()`` to
-    accept a connection on the newly bound socket.
-    """
-    warnings.warn("""eventlet.api.ssl_listener is deprecated.  Please use eventlet.wrap_ssl(eventlet.listen(
-        )) instead.""",
-                  DeprecationWarning, stacklevel=2)
-    from eventlet import util
-    import socket
-
-    socket = util.wrap_ssl(socket.socket(), certificate, private_key, True)
-    socket.bind(address)
-    socket.listen(50)
-    return socket
-
-
-def connect_tcp(address, localaddr=None):
-    """
-    Create a TCP connection to address ``(host, port)`` and return the socket.
-    Optionally, bind to localaddr ``(host, port)`` first.
-    """
-    warnings.warn(
-        """eventlet.api.connect_tcp is deprecated.  Please use eventlet.connect instead.""",
-        DeprecationWarning, stacklevel=2)
-
-    from eventlet import greenio, util
-    desc = greenio.GreenSocket(util.tcp_socket())
-    if localaddr is not None:
-        desc.bind(localaddr)
-    desc.connect(address)
-    return desc
-
-TimeoutError = greenthread.TimeoutError
-
-trampoline = hubs.trampoline
-
-spawn = greenthread.spawn
-spawn_n = greenthread.spawn_n
-
-
-kill = greenthread.kill
-
-call_after = greenthread.call_after
-call_after_local = greenthread.call_after_local
-call_after_global = greenthread.call_after_global
-
-
-class _SilentException(BaseException):
-    pass
-
-
-class FakeTimer(object):
-    def cancel(self):
-        pass
-
-
-class timeout(object):
-    """Raise an exception in the block after timeout.
-
-    Example::
-
-    with timeout(10):
-        urllib2.open('http://example.com')
-
-    Assuming code block is yielding (i.e. gives up control to the hub),
-    an exception provided in *exc* argument will be raised
-    (:class:`~eventlet.api.TimeoutError` if *exc* is omitted)::
-
-    try:
-        with timeout(10, MySpecialError, error_arg_1):
-            urllib2.open('http://example.com')
-    except MySpecialError as e:
-        print("special error received")
-
-    When *exc* is ``None``, code block is interrupted silently.
-    """
-
-    def __init__(self, seconds, *throw_args):
-        self.seconds = seconds
-        if seconds is None:
-            return
-        if not throw_args:
-            self.throw_args = (TimeoutError(), )
-        elif throw_args == (None, ):
-            self.throw_args = (_SilentException(), )
-        else:
-            self.throw_args = throw_args
-
-    def __enter__(self):
-        if self.seconds is None:
-            self.timer = FakeTimer()
-        else:
-            self.timer = exc_after(self.seconds, *self.throw_args)
-        return self.timer
-
-    def __exit__(self, typ, value, tb):
-        self.timer.cancel()
-        if typ is _SilentException and value in self.throw_args:
-            return True
-
-
-with_timeout = greenthread.with_timeout
-
-exc_after = greenthread.exc_after
-
-sleep = greenthread.sleep
-
-getcurrent = greenlet.getcurrent
-GreenletExit = greenlet.GreenletExit
-
-spew = debug.spew
-unspew = debug.unspew
-
-
-def named(name):
-    """Return an object given its name.
-
-    The name uses a module-like syntax, eg::
-
-      os.path.join
-
-    or::
-
-      mulib.mu.Resource
-    """
-    toimport = name
-    obj = None
-    import_err_strings = []
-    while toimport:
-        try:
-            obj = __import__(toimport)
-            break
-        except ImportError as err:
-            # print('Import error on %s: %s' % (toimport, err))  # debugging spam
-            import_err_strings.append(err.__str__())
-            toimport = '.'.join(toimport.split('.')[:-1])
-    if obj is None:
-        raise ImportError('%s could not be imported.  Import errors: %r' % (name, import_err_strings))
-    for seg in name.split('.')[1:]:
-        try:
-            obj = getattr(obj, seg)
-        except AttributeError:
-            dirobj = dir(obj)
-            dirobj.sort()
-            raise AttributeError('attribute %r missing from %r (%r) %r.  Import errors: %r' % (
-                seg, obj, dirobj, name, import_err_strings))
-    return obj
index 5994035918a8574e63b0d3fa15ff9d311d576762..2067772427bdbe62a639e71b096ef6a9f9f89646 100644 (file)
@@ -7,7 +7,7 @@ import sys
 
 import eventlet
 from eventlet import hubs
-from eventlet.support import greenlets, get_errno, six
+from eventlet.support import greenlets, get_errno
 
 try:
     sys.ps1
@@ -30,13 +30,11 @@ class FileProxy(object):
         pass
 
     def write(self, data, *a, **kw):
-        data = six.b(data)
         self.f.write(data, *a, **kw)
         self.f.flush()
 
     def readline(self, *a):
-        line = self.f.readline(*a).replace(b'\r\n', b'\n')
-        return six.u(line)
+        return self.f.readline(*a).replace('\r\n', '\n')
 
     def __getattr__(self, attr):
         return getattr(self.f, attr)
index a6aa915138e86a6f4bf61a9a4eb97e76d09ef03e..d634b2cf831af7c729d19e66fbdb1676aa004948 100644 (file)
@@ -30,7 +30,11 @@ def listen(addr, family=socket.AF_INET, backlog=50):
 
     :param addr: Address to listen on.  For TCP sockets, this is a (host, port)  tuple.
     :param family: Socket family, optional.  See :mod:`socket` documentation for available families.
-    :param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent.
+    :param backlog:
+
+        The maximum number of queued connections. Should be at least 1; the maximum
+        value is system-dependent.
+
     :return: The listening green socket object.
     """
     sock = socket.socket(family, socket.SOCK_STREAM)
@@ -127,7 +131,8 @@ except ImportError:
         from eventlet.green.OpenSSL import SSL
     except ImportError:
         def wrap_ssl_impl(*a, **kw):
-            raise ImportError("To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.6 or later.")
+            raise ImportError(
+                "To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.6 or later.")
     else:
         def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
                           cert_reqs=None, ssl_version=None, ca_certs=None,
index 7407ad5645401a0182bc5731e425c97c9c987ce9..431e6f0576759238759f2b2662e06528dd6bfabc 100644 (file)
@@ -1,78 +1,21 @@
 from __future__ import print_function
 
-import collections
-import traceback
-import warnings
-
-import eventlet
 from eventlet import event as _event
-from eventlet import hubs
-from eventlet import greenthread
-from eventlet import semaphore as semaphoremod
-
-
-class NOT_USED:
-    def __repr__(self):
-        return 'NOT_USED'
-
-NOT_USED = NOT_USED()
-
-
-def Event(*a, **kw):
-    warnings.warn("The Event class has been moved to the event module! "
-                  "Please construct event.Event objects instead.",
-                  DeprecationWarning, stacklevel=2)
-    return _event.Event(*a, **kw)
-
-
-def event(*a, **kw):
-    warnings.warn(
-        "The event class has been capitalized and moved!  Please "
-        "construct event.Event objects instead.",
-        DeprecationWarning, stacklevel=2)
-    return _event.Event(*a, **kw)
-
-
-def Semaphore(count):
-    warnings.warn(
-        "The Semaphore class has moved!  Please "
-        "use semaphore.Semaphore instead.",
-        DeprecationWarning, stacklevel=2)
-    return semaphoremod.Semaphore(count)
-
-
-def BoundedSemaphore(count):
-    warnings.warn(
-        "The BoundedSemaphore class has moved!  Please "
-        "use semaphore.BoundedSemaphore instead.",
-        DeprecationWarning, stacklevel=2)
-    return semaphoremod.BoundedSemaphore(count)
-
-
-def semaphore(count=0, limit=None):
-    warnings.warn(
-        "coros.semaphore is deprecated.  Please use either "
-        "semaphore.Semaphore or semaphore.BoundedSemaphore instead.",
-        DeprecationWarning, stacklevel=2)
-    if limit is None:
-        return Semaphore(count)
-    else:
-        return BoundedSemaphore(count)
 
 
 class metaphore(object):
     """This is sort of an inverse semaphore: a counter that starts at 0 and
     waits only if nonzero. It's used to implement a "wait for all" scenario.
 
-    >>> from eventlet import api, coros
+    >>> from eventlet import coros, spawn_n
     >>> count = coros.metaphore()
     >>> count.wait()
     >>> def decrementer(count, id):
     ...     print("{0} decrementing".format(id))
     ...     count.dec()
     ...
-    >>> _ = eventlet.spawn(decrementer, count, 'A')
-    >>> _ = eventlet.spawn(decrementer, count, 'B')
+    >>> _ = spawn_n(decrementer, count, 'A')
+    >>> _ = spawn_n(decrementer, count, 'B')
     >>> count.inc(2)
     >>> count.wait()
     A decrementing
@@ -116,212 +59,3 @@ class metaphore(object):
         resume the caller once the count decrements to zero again.
         """
         self.event.wait()
-
-
-def execute(func, *args, **kw):
-    """ Executes an operation asynchronously in a new coroutine, returning
-    an event to retrieve the return value.
-
-    This has the same api as the :meth:`eventlet.coros.CoroutinePool.execute`
-    method; the only difference is that this one creates a new coroutine
-    instead of drawing from a pool.
-
-    >>> from eventlet import coros
-    >>> evt = coros.execute(lambda a: ('foo', a), 1)
-    >>> evt.wait()
-    ('foo', 1)
-    """
-    warnings.warn(
-        "Coros.execute is deprecated.  Please use eventlet.spawn "
-        "instead.", DeprecationWarning, stacklevel=2)
-    return greenthread.spawn(func, *args, **kw)
-
-
-def CoroutinePool(*args, **kwargs):
-    warnings.warn(
-        "CoroutinePool is deprecated.  Please use "
-        "eventlet.GreenPool instead.", DeprecationWarning, stacklevel=2)
-    from eventlet.pool import Pool
-    return Pool(*args, **kwargs)
-
-
-class Queue(object):
-
-    def __init__(self):
-        warnings.warn(
-            "coros.Queue is deprecated.  Please use "
-            "eventlet.queue.Queue instead.",
-            DeprecationWarning, stacklevel=2)
-        self.items = collections.deque()
-        self._waiters = set()
-
-    def __nonzero__(self):
-        return len(self.items) > 0
-
-    __bool__ = __nonzero__
-
-    def __len__(self):
-        return len(self.items)
-
-    def __repr__(self):
-        params = (self.__class__.__name__, hex(id(self)),
-                  len(self.items), len(self._waiters))
-        return '<%s at %s items[%d] _waiters[%s]>' % params
-
-    def send(self, result=None, exc=None):
-        if exc is not None and not isinstance(exc, tuple):
-            exc = (exc, )
-        self.items.append((result, exc))
-        if self._waiters:
-            hubs.get_hub().schedule_call_global(0, self._do_send)
-
-    def send_exception(self, *args):
-        # the arguments are the same as for greenlet.throw
-        return self.send(exc=args)
-
-    def _do_send(self):
-        if self._waiters and self.items:
-            waiter = self._waiters.pop()
-            result, exc = self.items.popleft()
-            waiter.switch((result, exc))
-
-    def wait(self):
-        if self.items:
-            result, exc = self.items.popleft()
-            if exc is None:
-                return result
-            else:
-                eventlet.getcurrent().throw(*exc)
-        else:
-            self._waiters.add(eventlet.getcurrent())
-            try:
-                result, exc = hubs.get_hub().switch()
-                if exc is None:
-                    return result
-                else:
-                    eventlet.getcurrent().throw(*exc)
-            finally:
-                self._waiters.discard(eventlet.getcurrent())
-
-    def ready(self):
-        return len(self.items) > 0
-
-    def full(self):
-        # for consistency with Channel
-        return False
-
-    def waiting(self):
-        return len(self._waiters)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        return self.wait()
-
-
-class Channel(object):
-
-    def __init__(self, max_size=0):
-        warnings.warn(
-            "coros.Channel is deprecated.  Please use "
-            "eventlet.queue.Queue(0) instead.",
-            DeprecationWarning, stacklevel=2)
-        self.max_size = max_size
-        self.items = collections.deque()
-        self._waiters = set()
-        self._senders = set()
-
-    def __nonzero__(self):
-        return len(self.items) > 0
-
-    __bool__ = __nonzero__
-
-    def __len__(self):
-        return len(self.items)
-
-    def __repr__(self):
-        params = (self.__class__.__name__, hex(id(self)),
-                  self.max_size, len(self.items),
-                  len(self._waiters), len(self._senders))
-        return '<%s at %s max=%s items[%d] _w[%s] _s[%s]>' % params
-
-    def send(self, result=None, exc=None):
-        if exc is not None and not isinstance(exc, tuple):
-            exc = (exc, )
-        if eventlet.getcurrent() is hubs.get_hub().greenlet:
-            self.items.append((result, exc))
-            if self._waiters:
-                hubs.get_hub().schedule_call_global(0, self._do_switch)
-        else:
-            self.items.append((result, exc))
-            # note that send() does not work well with timeouts. if your timeout fires
-            # after this point, the item will remain in the queue
-            if self._waiters:
-                hubs.get_hub().schedule_call_global(0, self._do_switch)
-            if len(self.items) > self.max_size:
-                self._senders.add(eventlet.getcurrent())
-                try:
-                    hubs.get_hub().switch()
-                finally:
-                    self._senders.discard(eventlet.getcurrent())
-
-    def send_exception(self, *args):
-        # the arguments are the same as for greenlet.throw
-        return self.send(exc=args)
-
-    def _do_switch(self):
-        while True:
-            if self._waiters and self.items:
-                waiter = self._waiters.pop()
-                result, exc = self.items.popleft()
-                try:
-                    waiter.switch((result, exc))
-                except:
-                    traceback.print_exc()
-            elif self._senders and len(self.items) <= self.max_size:
-                sender = self._senders.pop()
-                try:
-                    sender.switch()
-                except:
-                    traceback.print_exc()
-            else:
-                break
-
-    def wait(self):
-        if self.items:
-            result, exc = self.items.popleft()
-            if len(self.items) <= self.max_size:
-                hubs.get_hub().schedule_call_global(0, self._do_switch)
-            if exc is None:
-                return result
-            else:
-                eventlet.getcurrent().throw(*exc)
-        else:
-            if self._senders:
-                hubs.get_hub().schedule_call_global(0, self._do_switch)
-            self._waiters.add(eventlet.getcurrent())
-            try:
-                result, exc = hubs.get_hub().switch()
-                if exc is None:
-                    return result
-                else:
-                    eventlet.getcurrent().throw(*exc)
-            finally:
-                self._waiters.discard(eventlet.getcurrent())
-
-    def ready(self):
-        return len(self.items) > 0
-
-    def full(self):
-        return len(self.items) >= self.max_size
-
-    def waiting(self):
-        return max(0, len(self._waiters) - len(self.items))
-
-
-def queue(max_size=None):
-    if max_size is None:
-        return Queue()
-    else:
-        return Channel(max_size)
index 973de808cc654f92545c852302c9c9364456c908..31cd7cf09abbbdae334632c9194803ec82882c86 100644 (file)
@@ -259,6 +259,7 @@ class BaseConnectionPool(Pool):
             # Free items created using min_size>0 are not tuples.
             conn = item[2] if isinstance(item, tuple) else item
             self._safe_close(conn, quiet=True)
+            self.current_size -= 1
 
     def __del__(self):
         self.clear()
@@ -317,73 +318,107 @@ class GenericConnectionWrapper(object):
     #     setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
     # * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
     # * other?
-    def __enter__(self): return self._base.__enter__()
+    def __enter__(self):
+        return self._base.__enter__()
 
-    def __exit__(self, exc, value, tb): return self._base.__exit__(exc, value, tb)
+    def __exit__(self, exc, value, tb):
+        return self._base.__exit__(exc, value, tb)
 
-    def __repr__(self): return self._base.__repr__()
+    def __repr__(self):
+        return self._base.__repr__()
 
-    def affected_rows(self): return self._base.affected_rows()
+    def affected_rows(self):
+        return self._base.affected_rows()
 
-    def autocommit(self, *args, **kwargs): return self._base.autocommit(*args, **kwargs)
+    def autocommit(self, *args, **kwargs):
+        return self._base.autocommit(*args, **kwargs)
 
-    def begin(self): return self._base.begin()
+    def begin(self):
+        return self._base.begin()
 
-    def change_user(self, *args, **kwargs): return self._base.change_user(*args, **kwargs)
+    def change_user(self, *args, **kwargs):
+        return self._base.change_user(*args, **kwargs)
 
-    def character_set_name(self, *args, **kwargs): return self._base.character_set_name(*args, **kwargs)
+    def character_set_name(self, *args, **kwargs):
+        return self._base.character_set_name(*args, **kwargs)
 
-    def close(self, *args, **kwargs): return self._base.close(*args, **kwargs)
+    def close(self, *args, **kwargs):
+        return self._base.close(*args, **kwargs)
 
-    def commit(self, *args, **kwargs): return self._base.commit(*args, **kwargs)
+    def commit(self, *args, **kwargs):
+        return self._base.commit(*args, **kwargs)
 
-    def cursor(self, *args, **kwargs): return self._base.cursor(*args, **kwargs)
+    def cursor(self, *args, **kwargs):
+        return self._base.cursor(*args, **kwargs)
 
-    def dump_debug_info(self, *args, **kwargs): return self._base.dump_debug_info(*args, **kwargs)
+    def dump_debug_info(self, *args, **kwargs):
+        return self._base.dump_debug_info(*args, **kwargs)
 
-    def errno(self, *args, **kwargs): return self._base.errno(*args, **kwargs)
+    def errno(self, *args, **kwargs):
+        return self._base.errno(*args, **kwargs)
 
-    def error(self, *args, **kwargs): return self._base.error(*args, **kwargs)
+    def error(self, *args, **kwargs):
+        return self._base.error(*args, **kwargs)
 
-    def errorhandler(self, *args, **kwargs): return self._base.errorhandler(*args, **kwargs)
+    def errorhandler(self, *args, **kwargs):
+        return self._base.errorhandler(*args, **kwargs)
 
-    def insert_id(self, *args, **kwargs): return self._base.insert_id(*args, **kwargs)
+    def insert_id(self, *args, **kwargs):
+        return self._base.insert_id(*args, **kwargs)
 
-    def literal(self, *args, **kwargs): return self._base.literal(*args, **kwargs)
+    def literal(self, *args, **kwargs):
+        return self._base.literal(*args, **kwargs)
 
-    def set_character_set(self, *args, **kwargs): return self._base.set_character_set(*args, **kwargs)
+    def set_character_set(self, *args, **kwargs):
+        return self._base.set_character_set(*args, **kwargs)
 
-    def set_sql_mode(self, *args, **kwargs): return self._base.set_sql_mode(*args, **kwargs)
+    def set_sql_mode(self, *args, **kwargs):
+        return self._base.set_sql_mode(*args, **kwargs)
 
-    def show_warnings(self): return self._base.show_warnings()
+    def show_warnings(self):
+        return self._base.show_warnings()
 
-    def warning_count(self): return self._base.warning_count()
+    def warning_count(self):
+        return self._base.warning_count()
 
-    def ping(self, *args, **kwargs): return self._base.ping(*args, **kwargs)
+    def ping(self, *args, **kwargs):
+        return self._base.ping(*args, **kwargs)
 
-    def query(self, *args, **kwargs): return self._base.query(*args, **kwargs)
+    def query(self, *args, **kwargs):
+        return self._base.query(*args, **kwargs)
 
-    def rollback(self, *args, **kwargs): return self._base.rollback(*args, **kwargs)
+    def rollback(self, *args, **kwargs):
+        return self._base.rollback(*args, **kwargs)
 
-    def select_db(self, *args, **kwargs): return self._base.select_db(*args, **kwargs)
+    def select_db(self, *args, **kwargs):
+        return self._base.select_db(*args, **kwargs)
 
-    def set_server_option(self, *args, **kwargs): return self._base.set_server_option(*args, **kwargs)
+    def set_server_option(self, *args, **kwargs):
+        return self._base.set_server_option(*args, **kwargs)
 
-    def server_capabilities(self, *args, **kwargs): return self._base.server_capabilities(*args, **kwargs)
+    def server_capabilities(self, *args, **kwargs):
+        return self._base.server_capabilities(*args, **kwargs)
 
-    def shutdown(self, *args, **kwargs): return self._base.shutdown(*args, **kwargs)
+    def shutdown(self, *args, **kwargs):
+        return self._base.shutdown(*args, **kwargs)
 
-    def sqlstate(self, *args, **kwargs): return self._base.sqlstate(*args, **kwargs)
+    def sqlstate(self, *args, **kwargs):
+        return self._base.sqlstate(*args, **kwargs)
 
-    def stat(self, *args, **kwargs): return self._base.stat(*args, **kwargs)
+    def stat(self, *args, **kwargs):
+        return self._base.stat(*args, **kwargs)
 
-    def store_result(self, *args, **kwargs): return self._base.store_result(*args, **kwargs)
+    def store_result(self, *args, **kwargs):
+        return self._base.store_result(*args, **kwargs)
 
-    def string_literal(self, *args, **kwargs): return self._base.string_literal(*args, **kwargs)
+    def string_literal(self, *args, **kwargs):
+        return self._base.string_literal(*args, **kwargs)
 
-    def thread_id(self, *args, **kwargs): return self._base.thread_id(*args, **kwargs)
+    def thread_id(self, *args, **kwargs):
+        return self._base.thread_id(*args, **kwargs)
 
-    def use_result(self, *args, **kwargs): return self._base.use_result(*args, **kwargs)
+    def use_result(self, *args, **kwargs):
+        return self._base.use_result(*args, **kwargs)
 
 
 class PooledConnectionWrapper(GenericConnectionWrapper):
@@ -420,7 +455,7 @@ class PooledConnectionWrapper(GenericConnectionWrapper):
 
     def __del__(self):
         return  # this causes some issues if __del__ is called in the
-                # main coroutine, so for now this is disabled
+        # main coroutine, so for now this is disabled
         # self.close()
 
 
index df837372ec65cc99ed4a7e429885bfff8c7c0175..373c1407d79a3968df82eb3faca325e45ab6669f 100644 (file)
@@ -9,7 +9,6 @@ slurp_properties(__socket, globals(),
 
 os = __import__('os')
 import sys
-import warnings
 from eventlet.hubs import get_hub
 from eventlet.greenio import GreenSocket as socket
 from eventlet.greenio import SSL as _SSL  # for exceptions
@@ -86,18 +85,3 @@ class GreenSSLObject(object):
         for debugging purposes; do not parse the content of this string because its
         format can't be parsed unambiguously."""
         return str(self.connection.get_peer_certificate().get_issuer())
-
-
-try:
-    from eventlet.green import ssl as ssl_module
-    sslerror = __socket.sslerror
-    __socket.ssl
-except AttributeError:
-    # if the real socket module doesn't have the ssl method or sslerror
-    # exception, we can't emulate them
-    pass
-else:
-    def ssl(sock, certificate=None, private_key=None):
-        warnings.warn("socket.ssl() is deprecated.  Use ssl.wrap_socket() instead.",
-                      DeprecationWarning, stacklevel=2)
-        return ssl_module.sslwrap_simple(sock, private_key, certificate)
index 2ea2e617e9d49d4ec2df7c12d993ef9a2177e907..3dd2c763e90bf1abb2c97af0179f9f1a441cd1fc 100644 (file)
@@ -18,11 +18,13 @@ __all__ = dir(builtins_orig)
 __patched__ = ['file', 'open']
 
 slurp_properties(builtins_orig, globals(),
-    ignore=__patched__, srckeys=dir(builtins_orig))
+                 ignore=__patched__, srckeys=dir(builtins_orig))
 
 hubs.get_hub()
 
 __original_file = file
+
+
 class file(__original_file):
     def __init__(self, *args, **kwargs):
         super(file, self).__init__(*args, **kwargs)
@@ -30,6 +32,8 @@ class file(__original_file):
 
 __original_open = open
 __opening = False
+
+
 def open(*args):
     global __opening
     result = __original_open(*args)
@@ -40,4 +44,4 @@ def open(*args):
         __opening = True
         hubs.notify_opened(result.fileno())
         __opening = False
-    return result
\ No newline at end of file
+    return result
index 26be3e03334c79438bc80d813de7a8f4aac81b9f..959d15f455879e82b915f63512e3a8eb0aa32e40 100644 (file)
@@ -96,11 +96,16 @@ def waitpid(pid, options):
             greenthread.sleep(0.01)
 
 __original_open__ = os_orig.open
-def open(file, flags, mode=0o777):
+
+
+def open(file, flags, mode=0o777, dir_fd=None):
     """ Wrap os.open
         This behaves identically, but collaborates with
         the hub's notify_opened protocol.
     """
-    fd = __original_open__(file, flags, mode)
+    if dir_fd is not None:
+        fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
+    else:
+        fd = __original_open__(file, flags, mode)
     hubs.notify_opened(fd)
     return fd
index 544e6dc4b18b5a577a22d85c4ba938bd73c7e231..e323adc23a671e0340dc5ef963a7263b7b5242e9 100644 (file)
@@ -23,7 +23,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""This module is API-equivalent to the standard library :mod:`profile` module but it is greenthread-aware as well as thread-aware.  Use this module
+"""This module is API-equivalent to the standard library :mod:`profile` module
+lbut it is greenthread-aware as well as thread-aware.  Use this module
 to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
 FIXME: No testcases for this module.
 """
@@ -34,9 +35,7 @@ __all__ = profile_orig.__all__
 from eventlet.patcher import slurp_properties
 slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
 
-import new
 import sys
-import traceback
 import functools
 
 from eventlet import greenthread
@@ -58,7 +57,7 @@ class Profile(profile_orig.Profile):
 
     def __call__(self, *args):
         """make callable, allowing an instance to be the profiler"""
-        r = self.dispatcher(*args)
+        self.dispatcher(*args)
 
     def _setup(self):
         self._has_setup = True
index f1bf3818baaf7e2c650d1b5050f5c298e4eb91ab..53fb3598efe197da851ff185329be310342c089a 100644 (file)
@@ -17,12 +17,12 @@ def get_fileno(obj):
         f = obj.fileno
     except AttributeError:
         if not isinstance(obj, six.integer_types):
-            raise TypeError("Expected int or long, got " + type(obj))
+            raise TypeError("Expected int or long, got %s" % type(obj))
         return obj
     else:
         rv = f()
         if not isinstance(rv, six.integer_types):
-            raise TypeError("Expected int or long, got " + type(rv))
+            raise TypeError("Expected int or long, got %s" % type(rv))
         return rv
 
 
index 42ad6386cb73961b866a1c57483bde5c4bfbdf29..2ec9d1b244fe57c5aeb1fe3b5dedd2d18e0f5103 100644 (file)
@@ -53,7 +53,8 @@ def create_connection(address,
             sock.connect(sa)
             return sock
 
-        except error as msg:
+        except error as e:
+            msg = e
             if sock is not None:
                 sock.close()
 
index ba5e6c20567c497d35e2c32d1d086599d274d9aa..963fbdb265af85483f7ff2bdcebc9a4bd98da866 100644 (file)
@@ -7,9 +7,11 @@ import sys
 import errno
 time = __import__('time')
 
-from eventlet.support import get_errno
+from eventlet.support import get_errno, PY33, six
 from eventlet.hubs import trampoline, IOClosed
-from eventlet.greenio import set_nonblocking, GreenSocket, SOCKET_CLOSED, CONNECT_ERR, CONNECT_SUCCESS
+from eventlet.greenio import (
+    set_nonblocking, GreenSocket, SOCKET_CLOSED, CONNECT_ERR, CONNECT_SUCCESS,
+)
 orig_socket = __import__('socket')
 socket = orig_socket.socket
 if sys.version_info >= (2, 7):
@@ -21,9 +23,10 @@ else:
 
 __patched__ = ['SSLSocket', 'wrap_socket', 'sslwrap_simple']
 
+_original_sslsocket = __ssl.SSLSocket
 
-class GreenSSLSocket(__ssl.SSLSocket):
 
+class GreenSSLSocket(_original_sslsocket):
     """ This is a green version of the SSLSocket class from the ssl module added
     in 2.6.  For documentation on it, please see the Python standard
     documentation.
@@ -40,23 +43,49 @@ class GreenSSLSocket(__ssl.SSLSocket):
     # we are inheriting from SSLSocket because its constructor calls
     # do_handshake whose behavior we wish to override
 
-    def __init__(self, sock, *args, **kw):
+    def __init__(self, sock, keyfile=None, certfile=None,
+                 server_side=False, cert_reqs=CERT_NONE,
+                 ssl_version=PROTOCOL_SSLv23, ca_certs=None,
+                 do_handshake_on_connect=True, *args, **kw):
         if not isinstance(sock, GreenSocket):
             sock = GreenSocket(sock)
 
         self.act_non_blocking = sock.act_non_blocking
-        self._timeout = sock.gettimeout()
-        super(GreenSSLSocket, self).__init__(sock.fd, *args, **kw)
+
+        if six.PY2:
+            # On Python 2 SSLSocket constructor queries the timeout, it'd break without
+            # this assignment
+            self._timeout = sock.gettimeout()
+
+        # nonblocking socket handshaking on connect got disabled so let's pretend it's disabled
+        # even when it's on
+        super(GreenSSLSocket, self).__init__(
+            sock.fd, keyfile, certfile, server_side, cert_reqs, ssl_version,
+            ca_certs, do_handshake_on_connect and six.PY2, *args, **kw)
 
         # the superclass initializer trashes the methods so we remove
         # the local-object versions of them and let the actual class
         # methods shine through
+        # Note: This for Python 2
         try:
             for fn in orig_socket._delegate_methods:
                 delattr(self, fn)
         except AttributeError:
             pass
 
+        if six.PY3:
+            # Python 3 SSLSocket construction process overwrites the timeout so restore it
+            self._timeout = sock.gettimeout()
+
+            # it also sets timeout to None internally apparently (tested with 3.4.2)
+            _original_sslsocket.settimeout(self, 0.0)
+            assert _original_sslsocket.gettimeout(self) == 0.0
+
+            # see note above about handshaking
+            self.do_handshake_on_connect = do_handshake_on_connect
+            if do_handshake_on_connect and self._connected:
+                self.do_handshake()
+
     def settimeout(self, timeout):
         self._timeout = timeout
 
@@ -98,14 +127,14 @@ class GreenSSLSocket(__ssl.SSLSocket):
         return self._call_trampolining(
             super(GreenSSLSocket, self).write, data)
 
-    def read(self, len=1024):
+    def read(self, *args, **kwargs):
         """Read up to LEN bytes and return them.
         Return zero-length string on EOF."""
         try:
             return self._call_trampolining(
-                super(GreenSSLSocket, self).read, len)
+                super(GreenSSLSocket, self).read, *args, **kwargs)
         except IOClosed:
-            return ''
+            return b''
 
     def send(self, data, flags=0):
         if self._sslobj:
@@ -171,27 +200,31 @@ class GreenSSLSocket(__ssl.SSLSocket):
                         raise
                     if get_errno(e) == errno.EWOULDBLOCK:
                         try:
-                            trampoline(self, read=True,
-                                       timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+                            trampoline(
+                                self, read=True,
+                                timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
                         except IOClosed:
-                            return ''
+                            return b''
                     if get_errno(e) in SOCKET_CLOSED:
-                        return ''
+                        return b''
                     raise
 
     def recv_into(self, buffer, nbytes=None, flags=0):
         if not self.act_non_blocking:
-            trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+            trampoline(self, read=True, timeout=self.gettimeout(),
+                       timeout_exc=timeout_exc('timed out'))
         return super(GreenSSLSocket, self).recv_into(buffer, nbytes, flags)
 
     def recvfrom(self, addr, buflen=1024, flags=0):
         if not self.act_non_blocking:
-            trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+            trampoline(self, read=True, timeout=self.gettimeout(),
+                       timeout_exc=timeout_exc('timed out'))
         return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags)
 
     def recvfrom_into(self, buffer, nbytes=None, flags=0):
         if not self.act_non_blocking:
-            trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+            trampoline(self, read=True, timeout=self.gettimeout(),
+                       timeout_exc=timeout_exc('timed out'))
         return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags)
 
     def unwrap(self):
@@ -228,8 +261,9 @@ class GreenSSLSocket(__ssl.SSLSocket):
                         real_connect(self, addr)
                     except orig_socket.error as exc:
                         if get_errno(exc) in CONNECT_ERR:
-                            trampoline(self, write=True,
-                                       timeout=end - time.time(), timeout_exc=timeout_exc('timed out'))
+                            trampoline(
+                                self, write=True,
+                                timeout=end - time.time(), timeout_exc=timeout_exc('timed out'))
                         elif get_errno(exc) in CONNECT_SUCCESS:
                             return
                         else:
@@ -245,14 +279,22 @@ class GreenSSLSocket(__ssl.SSLSocket):
         if self._sslobj:
             raise ValueError("attempt to connect already-connected SSLSocket!")
         self._socket_connect(addr)
-        if has_ciphers:
-            self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
-                                        self.cert_reqs, self.ssl_version,
-                                        self.ca_certs, self.ciphers)
+        server_side = False
+        try:
+            sslwrap = _ssl.sslwrap
+        except AttributeError:
+            # sslwrap was removed in 3.x and later in 2.7.9
+            if six.PY2:
+                sslobj = self._context._wrap_socket(self._sock, server_side, ssl_sock=self)
+            else:
+                context = self.context if PY33 else self._context
+                sslobj = context._wrap_socket(self, server_side)
         else:
-            self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
-                                        self.cert_reqs, self.ssl_version,
-                                        self.ca_certs)
+            sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
+                             self.cert_reqs, self.ssl_version,
+                             self.ca_certs, *([self.ciphers] if has_ciphers else []))
+
+        self._sslobj = sslobj
         if self.do_handshake_on_connect:
             self.do_handshake()
 
index e47e8d9969b63a02cca6a478b4743f3c30bae317..1d7c49a3b2ebcb6d24df2c66d0fab69e909a650f 100644 (file)
@@ -21,8 +21,9 @@ if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
         a child process.
         """
 
-        def __init__(self, cmd, output=None):
+        def __init__(self, cmd, timeout, output=None):
             self.cmd = cmd
+            self.timeout = timeout
             self.output = output
 
         def __str__(self):
@@ -64,7 +65,7 @@ class Popen(subprocess_orig.Popen):
                 if status is not None:
                     return status
                 if timeout is not None and time.time() > endtime:
-                    raise TimeoutExpired(self.args)
+                    raise TimeoutExpired(self.args, timeout)
                 eventlet.sleep(check_interval)
         except OSError as e:
             if e.errno == errno.ECHILD:
index b0494654d5479f365936a92815b7dfb1711b41df..236031089521045f7c298c2b79ecba115f4639a7 100644 (file)
@@ -1,6 +1,6 @@
 """Implements the standard thread module, using greenthreads."""
 from eventlet.support.six.moves import _thread as __thread
-from eventlet.support import greenlets as greenlet
+from eventlet.support import greenlets as greenlet, six
 from eventlet import greenthread
 from eventlet.semaphore import Semaphore as LockType
 
@@ -13,6 +13,15 @@ error = __thread.error
 __threadcount = 0
 
 
+if six.PY3:
+    def _set_sentinel():
+        # TODO this is a dummy code, reimplementing this may be needed:
+        # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
+        return allocate_lock()
+
+    TIMEOUT_MAX = __thread.TIMEOUT_MAX
+
+
 def _count():
     return __threadcount
 
@@ -74,6 +83,7 @@ if hasattr(__thread, 'stack_size'):
             return __original_stack_size__(size)
         else:
             pass
-            # not going to decrease stack_size, because otherwise other greenlets in this thread will suffer
+            # not going to decrease stack_size, because otherwise other greenlets in
+            # this thread will suffer
 
 from eventlet.corolocal import local as _local
index 5c56ba126152df9f859bb085b2e239ec1ddbdfe8..31762610033e5355b14c8c766ef4a6832090f4f8 100644 (file)
@@ -2,12 +2,17 @@
 from eventlet import patcher
 from eventlet.green import thread
 from eventlet.green import time
-from eventlet.support import greenlets as greenlet
+from eventlet.support import greenlets as greenlet, six
 
-__patched__ = ['_start_new_thread', '_allocate_lock', '_get_ident', '_sleep',
-               'local', 'stack_size', 'Lock', 'currentThread',
+__patched__ = ['_start_new_thread', '_allocate_lock',
+               '_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
                'current_thread', '_after_fork', '_shutdown']
 
+if six.PY2:
+    __patched__ += ['_get_ident']
+else:
+    __patched__ += ['get_ident', '_set_sentinel']
+
 __orig_threading = patcher.original('threading')
 __threadlocal = __orig_threading.local()
 
@@ -15,7 +20,7 @@ __threadlocal = __orig_threading.local()
 patcher.inject(
     'threading',
     globals(),
-    ('thread', thread),
+    ('thread' if six.PY2 else '_thread', thread),
     ('time', time))
 
 del patcher
index 07c2fbe2b58045e1f92edb0712016afeecb4d071..b08eabdc8431117a90fa9294950582f1a317dcd5 100644 (file)
@@ -1,4 +1,6 @@
-"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
+# -*- coding: utf-8 -*-
+"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
+found in :mod:`pyzmq <zmq>` to be non blocking
 """
 
 from __future__ import with_statement
index 38e8168360e698571bc0df2fee25cee5658f5595..f44096e91a56b9c93447a30a1db00713a8d4d141 100644 (file)
@@ -119,6 +119,9 @@ class GreenSocket(object):
     to save syscalls.
     """
 
+    # This placeholder is to prevent __getattr__ from creating an infinite call loop
+    fd = None
+
     def __init__(self, family_or_realsock=socket.AF_INET, *args, **kwargs):
         should_set_nonblocking = kwargs.pop('set_nonblocking', True)
         if isinstance(family_or_realsock, six.integer_types):
@@ -160,12 +163,23 @@ class GreenSocket(object):
     def _sock(self):
         return self
 
+    if six.PY3:
+        def _get_io_refs(self):
+            return self.fd._io_refs
+
+        def _set_io_refs(self, value):
+            self.fd._io_refs = value
+
+        _io_refs = property(_get_io_refs, _set_io_refs)
+
     # Forward unknown attributes to fd, cache the value for future use.
     # I do not see any simple attribute which could be changed
     # so caching everything in self is fine.
     # If we find such attributes - only attributes having __get__ might be cached.
     # For now - I do not want to complicate it.
     def __getattr__(self, name):
+        if self.fd is None:
+            raise AttributeError(name)
         attr = getattr(self.fd, name)
         setattr(self, name, attr)
         return attr
@@ -182,8 +196,8 @@ class GreenSocket(object):
             raise IOClosed()
         try:
             return trampoline(fd, read=read, write=write, timeout=timeout,
-                            timeout_exc=timeout_exc,
-                            mark_as_closed=self._mark_as_closed)
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
         except IOClosed:
             # This socket's been obsoleted. De-fang it.
             self._mark_as_closed()
@@ -200,19 +214,17 @@ class GreenSocket(object):
                 set_nonblocking(client)
                 return type(self)(client), addr
             self._trampoline(fd, read=True, timeout=self.gettimeout(),
-                       timeout_exc=socket.timeout("timed out"))
+                             timeout_exc=socket.timeout("timed out"))
 
     def _mark_as_closed(self):
         """ Mark this socket as being closed """
         self._closed = True
 
-    def close(self):
-        notify_close(self.fd)
-        self._mark_as_closed()
-        return self.fd.close()
-
     def __del__(self):
-        self.close()
+        # This is in case self.close is not assigned yet (currently the constructor does it)
+        close = getattr(self, 'close', None)
+        if close is not None:
+            close()
 
     def connect(self, address):
         if self.act_non_blocking:
@@ -234,7 +246,7 @@ class GreenSocket(object):
                     raise socket.timeout("timed out")
                 try:
                     self._trampoline(fd, write=True, timeout=end - time.time(),
-                           timeout_exc=socket.timeout("timed out"))
+                                     timeout_exc=socket.timeout("timed out"))
                 except IOClosed:
                     # ... we need some workable errno here.
                     raise socket.error(errno.EBADFD)
@@ -262,7 +274,7 @@ class GreenSocket(object):
                     if time.time() >= end:
                         raise socket.timeout(errno.EAGAIN)
                     self._trampoline(fd, write=True, timeout=end - time.time(),
-                               timeout_exc=socket.timeout(errno.EAGAIN))
+                                     timeout_exc=socket.timeout(errno.EAGAIN))
                     socket_checkerr(fd)
                 except socket.error as ex:
                     return get_errno(ex)
@@ -275,12 +287,16 @@ class GreenSocket(object):
         newsock.settimeout(self.gettimeout())
         return newsock
 
-    def makefile(self, *args, **kw):
-        dupped = self.dup()
-        res = _fileobject(dupped, *args, **kw)
-        if hasattr(dupped, "_drop"):
-            dupped._drop()
-        return res
+    if six.PY3:
+        def makefile(self, *args, **kwargs):
+            return _original_socket.makefile(self, *args, **kwargs)
+    else:
+        def makefile(self, *args, **kwargs):
+            dupped = self.dup()
+            res = _fileobject(dupped, *args, **kwargs)
+            if hasattr(dupped, "_drop"):
+                dupped._drop()
+            return res
 
     def makeGreenFile(self, *args, **kw):
         warnings.warn("makeGreenFile has been deprecated, please use "
@@ -314,19 +330,19 @@ class GreenSocket(object):
     def recvfrom(self, *args):
         if not self.act_non_blocking:
             self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
-                       timeout_exc=socket.timeout("timed out"))
+                             timeout_exc=socket.timeout("timed out"))
         return self.fd.recvfrom(*args)
 
     def recvfrom_into(self, *args):
         if not self.act_non_blocking:
             self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
-                       timeout_exc=socket.timeout("timed out"))
+                             timeout_exc=socket.timeout("timed out"))
         return self.fd.recvfrom_into(*args)
 
     def recv_into(self, *args):
         if not self.act_non_blocking:
             self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
-                       timeout_exc=socket.timeout("timed out"))
+                             timeout_exc=socket.timeout("timed out"))
         return self.fd.recv_into(*args)
 
     def send(self, data, flags=0):
@@ -349,7 +365,7 @@ class GreenSocket(object):
 
             try:
                 self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
-                           timeout_exc=socket.timeout("timed out"))
+                                 timeout_exc=socket.timeout("timed out"))
             except IOClosed:
                 raise socket.error(errno.ECONNRESET, 'Connection closed by another thread')
 
@@ -419,8 +435,8 @@ class _SocketDuckForFd(object):
             raise IOClosed()
         try:
             return trampoline(fd, read=read, write=write, timeout=timeout,
-                            timeout_exc=timeout_exc,
-                            mark_as_closed=self._mark_as_closed)
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
         except IOClosed:
             # Our fileno has been obsoleted. Defang ourselves to
             # prevent spurious closes.
@@ -457,11 +473,12 @@ class _SocketDuckForFd(object):
     def send(self, data):
         while True:
             try:
-                os.write(self._fileno, data)
+                return os.write(self._fileno, data)
             except OSError as e:
                 if get_errno(e) not in SOCKET_BLOCKING:
                     raise IOError(*e.args)
-            trampoline(self, write=True)
+                else:
+                    trampoline(self, write=True)
 
     def sendall(self, data):
         len_data = len(data)
index 3660308ff0119c290c6cba46b04f25f50b48d52b..921d7a921bd05726f6110a019bd070e131280f9e 100644 (file)
@@ -8,7 +8,9 @@ from eventlet.hubs import timer
 from eventlet.support import greenlets as greenlet, six
 import warnings
 
-__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n', 'spawn_after', 'spawn_after_local', 'GreenThread']
+__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n',
+           'kill',
+           'spawn_after', 'spawn_after_local', 'GreenThread']
 
 getcurrent = greenlet.getcurrent
 
@@ -180,9 +182,9 @@ class GreenThread(greenlet.greenlet):
             def func(gt, [curried args/kwargs]):
 
         When the GreenThread finishes its run, it calls *func* with itself
-        and with the `curried arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied at link-time.  If the function wants
-        to retrieve the result of the GreenThread, it should call wait()
-        on its first argument.
+        and with the `curried arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied
+        at link-time.  If the function wants to retrieve the result of the GreenThread,
+        it should call wait() on its first argument.
 
         Note that *func* is called within execution context of
         the GreenThread, so it is possible to interfere with other linked
index 4662ec307c92f9015b774bba9e3a3d72a2473ea2..9f72c164d10c743b2f59304402337f01205c5ed4 100644 (file)
@@ -1,14 +1,7 @@
-import sys
 import os
-from eventlet.support import greenlets as greenlet, six
-from eventlet import patcher
 
-try:
-    # try and import pkg_resources ...
-    import pkg_resources
-except ImportError:
-    # ... but do not depend on it
-    pkg_resources = None
+from eventlet import patcher
+from eventlet.support import greenlets as greenlet, six
 
 
 __all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
@@ -86,6 +79,15 @@ def use_hub(mod=None):
                 mod = getattr(mod, classname)
         else:
             found = False
+
+            # setuptools 5.4.1 test_import_patched_defaults fail
+            # https://github.com/eventlet/eventlet/issues/177
+            try:
+                # try and import pkg_resources ...
+                import pkg_resources
+            except ImportError:
+                # ... but do not depend on it
+                pkg_resources = None
             if pkg_resources is not None:
                 for entry in pkg_resources.iter_entry_points(
                         group='eventlet.hubs', name=mod):
@@ -115,12 +117,13 @@ def get_hub():
         hub = _threadlocal.hub = _threadlocal.Hub()
     return hub
 
+
 from eventlet import timeout
 
 
 def trampoline(fd, read=None, write=None, timeout=None,
                timeout_exc=timeout.Timeout,
-               mark_as_closed = None):
+               mark_as_closed=None):
     """Suspend the current coroutine until the given socket object or file
     descriptor is ready to *read*, ready to *write*, or the specified
     *timeout* elapses, depending on arguments specified.
@@ -163,6 +166,7 @@ def trampoline(fd, read=None, write=None, timeout=None,
         if t is not None:
             t.cancel()
 
+
 def notify_close(fd):
     """
     A particular file descriptor has been explicitly closed. Register for any
@@ -171,6 +175,7 @@ def notify_close(fd):
     hub = get_hub()
     hub.notify_close(fd)
 
+
 def notify_opened(fd):
     """
     Some file descriptors may be closed 'silently' - that is, by the garbage
index c4906c5f4207d99fd33dfd23f82789024458ad5b..8dda018b50a580faeb1e0ab18d2f4192e927b36c 100644 (file)
@@ -172,8 +172,9 @@ class BaseHub(object):
                     "particular socket.  Consider using a pools.Pool. "
                     "If you do know what you're doing and want to disable "
                     "this error, call "
-                    "eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; THAT THREAD=%s" % (
-                    evtype, fileno, evtype, cb, bucket[fileno]))
+                    "eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
+                    "THAT THREAD=%s" % (
+                        evtype, fileno, evtype, cb, bucket[fileno]))
             # store off the second listener in another structure
             self.secondaries[evtype].setdefault(fileno, []).append(listener)
         else:
@@ -365,7 +366,8 @@ class BaseHub(object):
         if self.running:
             self.stopping = True
         if wait:
-            assert self.greenlet is not greenlet.getcurrent(), "Can't abort with wait from inside the hub's greenlet."
+            assert self.greenlet is not greenlet.getcurrent(
+            ), "Can't abort with wait from inside the hub's greenlet."
             # schedule an immediate timer just so the hub doesn't sleep
             self.schedule_call_global(0, lambda: None)
             # switch to it; when done the hub will switch back to its parent,
index 937818bc5e0cd85bce185e69cae9979f8c5a976e..a46c5715be989634032cc813b339b0adb0311be9 100644 (file)
@@ -88,7 +88,8 @@ class Hub(BaseHub):
     def abort(self, wait=True):
         self.schedule_call_global(0, self.greenlet.throw, greenlet.GreenletExit)
         if wait:
-            assert self.greenlet is not greenlet.getcurrent(), "Can't abort with wait from inside the hub's greenlet."
+            assert self.greenlet is not greenlet.getcurrent(
+            ), "Can't abort with wait from inside the hub's greenlet."
             self.switch()
 
     def _getrunning(self):
diff --git a/eventlet/eventlet/hubs/twistedr.py b/eventlet/eventlet/hubs/twistedr.py
deleted file mode 100644 (file)
index 1869621..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-import sys
-import threading
-from twisted.internet.base import DelayedCall as TwistedDelayedCall
-from eventlet.support import greenlets as greenlet
-from eventlet.hubs.hub import FdListener, READ, WRITE
-
-
-class DelayedCall(TwistedDelayedCall):
-    "fix DelayedCall to behave like eventlet's Timer in some respects"
-
-    def cancel(self):
-        if self.cancelled or self.called:
-            self.cancelled = True
-            return
-        return TwistedDelayedCall.cancel(self)
-
-
-class LocalDelayedCall(DelayedCall):
-
-    def __init__(self, *args, **kwargs):
-        self.greenlet = greenlet.getcurrent()
-        DelayedCall.__init__(self, *args, **kwargs)
-
-    def _get_cancelled(self):
-        if self.greenlet is None or self.greenlet.dead:
-            return True
-        return self.__dict__['cancelled']
-
-    def _set_cancelled(self, value):
-        self.__dict__['cancelled'] = value
-
-    cancelled = property(_get_cancelled, _set_cancelled)
-
-
-def callLater(DelayedCallClass, reactor, _seconds, _f, *args, **kw):
-    # the same as original but creates fixed DelayedCall instance
-    assert callable(_f), "%s is not callable" % _f
-    if not isinstance(_seconds, (int, long, float)):
-        raise TypeError("Seconds must be int, long, or float, was " + type(_seconds))
-    assert sys.maxint >= _seconds >= 0, \
-           "%s is not greater than or equal to 0 seconds" % (_seconds,)
-    tple = DelayedCallClass(reactor.seconds() + _seconds, _f, args, kw,
-                            reactor._cancelCallLater,
-                            reactor._moveCallLaterSooner,
-                            seconds=reactor.seconds)
-    reactor._newTimedCalls.append(tple)
-    return tple
-
-
-class socket_rwdescriptor(FdListener):
-    # implements(IReadWriteDescriptor)
-    def __init__(self, evtype, fileno, cb):
-        super(socket_rwdescriptor, self).__init__(evtype, fileno, cb)
-        if not isinstance(fileno, (int, long)):
-            raise TypeError("Expected int or long, got %s" % type(fileno))
-        # Twisted expects fileno to be a callable, not an attribute
-
-        def _fileno():
-            return fileno
-        self.fileno = _fileno
-
-    # required by glib2reactor
-    disconnected = False
-
-    def doRead(self):
-        if self.evtype is READ:
-            self.cb(self)
-
-    def doWrite(self):
-        if self.evtype == WRITE:
-            self.cb(self)
-
-    def connectionLost(self, reason):
-        self.disconnected = True
-        if self.cb:
-            self.cb(reason)
-        # trampoline() will now switch into the greenlet that owns the socket
-        # leaving the mainloop unscheduled. However, when the next switch
-        # to the mainloop occurs, twisted will not re-evaluate the delayed calls
-        # because it assumes that none were scheduled since no client code was executed
-        # (it has no idea it was switched away). So, we restart the mainloop.
-        # XXX this is not enough, pollreactor prints the traceback for
-        # this and epollreactor times out. see test__hub.TestCloseSocketWhilePolling
-        raise greenlet.GreenletExit
-
-    logstr = "twistedr"
-
-    def logPrefix(self):
-        return self.logstr
-
-
-class BaseTwistedHub(object):
-    """This hub does not run a dedicated greenlet for the mainloop (unlike TwistedHub).
-    Instead, it assumes that the mainloop is run in the main greenlet.
-
-    This makes running "green" functions in the main greenlet impossible but is useful
-    when you want to call reactor.run() yourself.
-    """
-
-    # XXX: remove me from here. make functions that depend on reactor
-    # XXX: hub's methods
-    uses_twisted_reactor = True
-
-    WRITE = WRITE
-    READ = READ
-
-    def __init__(self, mainloop_greenlet):
-        self.greenlet = mainloop_greenlet
-
-    def switch(self):
-        assert greenlet.getcurrent() is not self.greenlet, \
-               "Cannot switch from MAINLOOP to MAINLOOP"
-        try:
-           greenlet.getcurrent().parent = self.greenlet
-        except ValueError:
-           pass
-        return self.greenlet.switch()
-
-    def stop(self):
-        from twisted.internet import reactor
-        reactor.stop()
-
-    def add(self, evtype, fileno, cb):
-        from twisted.internet import reactor
-        descriptor = socket_rwdescriptor(evtype, fileno, cb)
-        if evtype is READ:
-            reactor.addReader(descriptor)
-        if evtype is WRITE:
-            reactor.addWriter(descriptor)
-        return descriptor
-
-    def remove(self, descriptor):
-        from twisted.internet import reactor
-        reactor.removeReader(descriptor)
-        reactor.removeWriter(descriptor)
-
-    def schedule_call_local(self, seconds, func, *args, **kwargs):
-        from twisted.internet import reactor
-
-        def call_if_greenlet_alive(*args1, **kwargs1):
-            if timer.greenlet.dead:
-                return
-            return func(*args1, **kwargs1)
-        timer = callLater(LocalDelayedCall, reactor, seconds,
-                          call_if_greenlet_alive, *args, **kwargs)
-        return timer
-
-    schedule_call = schedule_call_local
-
-    def schedule_call_global(self, seconds, func, *args, **kwargs):
-        from twisted.internet import reactor
-        return callLater(DelayedCall, reactor, seconds, func, *args, **kwargs)
-
-    def abort(self):
-        from twisted.internet import reactor
-        reactor.crash()
-
-    @property
-    def running(self):
-        from twisted.internet import reactor
-        return reactor.running
-
-    # for debugging:
-
-    def get_readers(self):
-        from twisted.internet import reactor
-        readers = reactor.getReaders()
-        readers.remove(getattr(reactor, 'waker'))
-        return readers
-
-    def get_writers(self):
-        from twisted.internet import reactor
-        return reactor.getWriters()
-
-    def get_timers_count(self):
-        from twisted.internet import reactor
-        return len(reactor.getDelayedCalls())
-
-
-class TwistedHub(BaseTwistedHub):
-    # wrapper around reactor that runs reactor's main loop in a separate greenlet.
-    # whenever you need to wait, i.e. inside a call that must appear
-    # blocking, call hub.switch() (then your blocking operation should switch back to you
-    # upon completion)
-
-    # unlike other eventlet hubs, which are created per-thread,
-    # this one cannot be instantiated more than once, because
-    # twisted doesn't allow that
-
-    # 0-not created
-    # 1-initialized but not started
-    # 2-started
-    # 3-restarted
-    state = 0
-
-    installSignalHandlers = False
-
-    def __init__(self):
-        assert Hub.state == 0, ('%s hub can only be instantiated once' % type(self).__name__,
-                              Hub.state)
-        Hub.state = 1
-        make_twisted_threadpool_daemonic() # otherwise the program
-                                        # would hang after the main
-                                        # greenlet exited
-        g = greenlet.greenlet(self.run)
-        BaseTwistedHub.__init__(self, g)
-
-    def switch(self):
-        assert greenlet.getcurrent() is not self.greenlet, \
-               "Cannot switch from MAINLOOP to MAINLOOP"
-        if self.greenlet.dead:
-            self.greenlet = greenlet.greenlet(self.run)
-        try:
-            greenlet.getcurrent().parent = self.greenlet
-        except ValueError:
-            pass
-        return self.greenlet.switch()
-
-    def run(self, installSignalHandlers=None):
-        if installSignalHandlers is None:
-            installSignalHandlers = self.installSignalHandlers
-
-        # main loop, executed in a dedicated greenlet
-        from twisted.internet import reactor
-        assert Hub.state in [1, 3], ('run function is not reentrant', Hub.state)
-
-        if Hub.state == 1:
-            reactor.startRunning(installSignalHandlers=installSignalHandlers)
-        elif not reactor.running:
-            # if we're here, then reactor was explicitly stopped with reactor.stop()
-            # restarting reactor (like we would do after an exception) in this case
-            # is not an option.
-            raise AssertionError("reactor is not running")
-
-        try:
-            self.mainLoop(reactor)
-        except:
-            # an exception in the mainLoop is a normal operation (e.g. user's
-            # signal handler could raise an exception). In this case we will re-enter
-            # the main loop at the next switch.
-            Hub.state = 3
-            raise
-
-        # clean exit here is needed for abort() method to work
-        # do not raise an exception here.
-
-    def mainLoop(self, reactor):
-        Hub.state = 2
-        # Unlike reactor's mainLoop, this function does not catch exceptions.
-        # Anything raised goes into the main greenlet (because it is always the
-        # parent of this one)
-        while reactor.running:
-            # Advance simulation time in delayed event processors.
-            reactor.runUntilCurrent()
-            t2 = reactor.timeout()
-            t = reactor.running and t2
-            reactor.doIteration(t)
-
-Hub = TwistedHub
-
-
-class DaemonicThread(threading.Thread):
-    def _set_daemon(self):
-        return True
-
-
-def make_twisted_threadpool_daemonic():
-    from twisted.python.threadpool import ThreadPool
-    if ThreadPool.threadFactory != DaemonicThread:
-        ThreadPool.threadFactory = DaemonicThread
index 39081ac569c75e0c9040f4317384bfbfbaf638e8..ea3189718129a8998828cf7902739388f95a8511 100644 (file)
@@ -47,6 +47,10 @@ def inject(module_name, new_globals, *additional_modules):
     that the already-imported modules in *additional_modules* are used when
     *module_name* makes its imports.
 
+    **Note:** This function does not create or change any sys.modules item, so
+    if your greened module use code like 'sys.modules["your_module_name"]', you
+    need to update sys.modules by yourself.
+
     *new_globals* is either None or a globals dictionary that gets populated
     with the contents of the *module_name* module.  This is useful when creating
     a "green" version of some other module.
@@ -70,7 +74,7 @@ def inject(module_name, new_globals, *additional_modules):
             _green_socket_modules() +
             _green_thread_modules() +
             _green_time_modules())
-            # _green_MySQLdb()) # enable this after a short baking-in period
+        # _green_MySQLdb()) # enable this after a short baking-in period
 
     # after this we are gonna screw with sys.modules, so capture the
     # state of all the modules we're going to mess with, and lock
@@ -283,6 +287,12 @@ def monkey_patch(**on):
     finally:
         imp.release_lock()
 
+    if sys.version_info >= (3, 3):
+        import importlib._bootstrap
+        thread = original('_thread')
+        # importlib must use real thread locks, not eventlet.Semaphore
+        importlib._bootstrap._thread = thread
+
 
 def is_monkey_patched(module):
     """Returns True if the given module is monkeypatched currently, False if
diff --git a/eventlet/eventlet/pool.py b/eventlet/eventlet/pool.py
deleted file mode 100644 (file)
index be9db8f..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-from __future__ import print_function
-
-from eventlet import coros, proc, api
-from eventlet.semaphore import Semaphore
-from eventlet.support import six
-
-import warnings
-warnings.warn(
-    "The pool module is deprecated.  Please use the "
-    "eventlet.GreenPool and eventlet.GreenPile classes instead.",
-    DeprecationWarning, stacklevel=2)
-
-
-class Pool(object):
-    def __init__(self, min_size=0, max_size=4, track_events=False):
-        if min_size > max_size:
-            raise ValueError('min_size cannot be bigger than max_size')
-        self.max_size = max_size
-        self.sem = Semaphore(max_size)
-        self.procs = proc.RunningProcSet()
-        if track_events:
-            self.results = coros.queue()
-        else:
-            self.results = None
-
-    def resize(self, new_max_size):
-        """ Change the :attr:`max_size` of the pool.
-
-        If the pool gets resized when there are more than *new_max_size*
-        coroutines checked out, when they are returned to the pool they will be
-        discarded.  The return value of :meth:`free` will be negative in this
-        situation.
-        """
-        max_size_delta = new_max_size - self.max_size
-        self.sem.counter += max_size_delta
-        self.max_size = new_max_size
-
-    @property
-    def current_size(self):
-        """ The number of coroutines that are currently executing jobs. """
-        return len(self.procs)
-
-    def free(self):
-        """ Returns the number of coroutines that are available for doing
-        work."""
-        return self.sem.counter
-
-    def execute(self, func, *args, **kwargs):
-        """Execute func in one of the coroutines maintained
-        by the pool, when one is free.
-
-        Immediately returns a :class:`~eventlet.proc.Proc` object which can be
-        queried for the func's result.
-
-        >>> pool = Pool()
-        >>> task = pool.execute(lambda a: ('foo', a), 1)
-        >>> task.wait()
-        ('foo', 1)
-        """
-        # if reentering an empty pool, don't try to wait on a coroutine freeing
-        # itself -- instead, just execute in the current coroutine
-        if self.sem.locked() and api.getcurrent() in self.procs:
-            p = proc.spawn(func, *args, **kwargs)
-            try:
-                p.wait()
-            except:
-                pass
-        else:
-            self.sem.acquire()
-            p = self.procs.spawn(func, *args, **kwargs)
-            # assuming the above line cannot raise
-            p.link(lambda p: self.sem.release())
-        if self.results is not None:
-            p.link(self.results)
-        return p
-
-    execute_async = execute
-
-    def _execute(self, evt, func, args, kw):
-        p = self.execute(func, *args, **kw)
-        p.link(evt)
-        return p
-
-    def waitall(self):
-        """ Calling this function blocks until every coroutine
-        completes its work (i.e. there are 0 running coroutines)."""
-        return self.procs.waitall()
-
-    wait_all = waitall
-
-    def wait(self):
-        """Wait for the next execute in the pool to complete,
-        and return the result."""
-        return self.results.wait()
-
-    def waiting(self):
-        """Return the number of coroutines waiting to execute.
-        """
-        if self.sem.balance < 0:
-            return -self.sem.balance
-        else:
-            return 0
-
-    def killall(self):
-        """ Kill every running coroutine as immediately as possible."""
-        return self.procs.killall()
-
-    def launch_all(self, function, iterable):
-        """For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
-        in its own coroutine -- like ``itertools.starmap()``, but in parallel.
-        Discard values returned by ``function()``. You should call
-        ``wait_all()`` to wait for all coroutines, newly-launched plus any
-        previously-submitted :meth:`execute` or :meth:`execute_async` calls, to
-        complete.
-
-        >>> pool = Pool()
-        >>> def saw(x):
-        ...     print("I saw %s!" % x)
-        ...
-        >>> pool.launch_all(saw, "ABC")
-        >>> pool.wait_all()
-        I saw A!
-        I saw B!
-        I saw C!
-        """
-        for tup in iterable:
-            self.execute(function, *tup)
-
-    def process_all(self, function, iterable):
-        """For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
-        in its own coroutine -- like ``itertools.starmap()``, but in parallel.
-        Discard values returned by ``function()``. Don't return until all
-        coroutines, newly-launched plus any previously-submitted :meth:`execute()`
-        or :meth:`execute_async` calls, have completed.
-
-        >>> from eventlet import coros
-        >>> pool = coros.CoroutinePool()
-        >>> def saw(x): print("I saw %s!" % x)
-        ...
-        >>> pool.process_all(saw, "DEF")
-        I saw D!
-        I saw E!
-        I saw F!
-        """
-        self.launch_all(function, iterable)
-        self.wait_all()
-
-    def generate_results(self, function, iterable, qsize=None):
-        """For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
-        in its own coroutine -- like ``itertools.starmap()``, but in parallel.
-        Yield each of the values returned by ``function()``, in the order
-        they're completed rather than the order the coroutines were launched.
-
-        Iteration stops when we've yielded results for each arguments tuple in
-        *iterable*. Unlike :meth:`wait_all` and :meth:`process_all`, this
-        function does not wait for any previously-submitted :meth:`execute` or
-        :meth:`execute_async` calls.
-
-        Results are temporarily buffered in a queue. If you pass *qsize=*, this
-        value is used to limit the max size of the queue: an attempt to buffer
-        too many results will suspend the completed :class:`CoroutinePool`
-        coroutine until the requesting coroutine (the caller of
-        :meth:`generate_results`) has retrieved one or more results by calling
-        this generator-iterator's ``next()``.
-
-        If any coroutine raises an uncaught exception, that exception will
-        propagate to the requesting coroutine via the corresponding ``next()``
-        call.
-
-        What I particularly want these tests to illustrate is that using this
-        generator function::
-
-            for result in generate_results(function, iterable):
-                # ... do something with result ...
-                pass
-
-        executes coroutines at least as aggressively as the classic eventlet
-        idiom::
-
-            events = [pool.execute(function, *args) for args in iterable]
-            for event in events:
-                result = event.wait()
-                # ... do something with result ...
-
-        even without a distinct event object for every arg tuple in *iterable*,
-        and despite the funny flow control from interleaving launches of new
-        coroutines with yields of completed coroutines' results.
-
-        (The use case that makes this function preferable to the classic idiom
-        above is when the *iterable*, which may itself be a generator, produces
-        millions of items.)
-
-        >>> from eventlet import coros
-        >>> from eventlet.support import six
-        >>> import string
-        >>> pool = coros.CoroutinePool(max_size=5)
-        >>> pausers = [coros.Event() for x in range(2)]
-        >>> def longtask(evt, desc):
-        ...     print("%s woke up with %s" % (desc, evt.wait()))
-        ...
-        >>> pool.launch_all(longtask, zip(pausers, "AB"))
-        >>> def quicktask(desc):
-        ...     print("returning %s" % desc)
-        ...     return desc
-        ...
-
-        (Instead of using a ``for`` loop, step through :meth:`generate_results`
-        items individually to illustrate timing)
-
-        >>> step = iter(pool.generate_results(quicktask, string.ascii_lowercase))
-        >>> print(six.next(step))
-        returning a
-        returning b
-        returning c
-        a
-        >>> print(six.next(step))
-        b
-        >>> print(six.next(step))
-        c
-        >>> print(six.next(step))
-        returning d
-        returning e
-        returning f
-        d
-        >>> pausers[0].send("A")
-        >>> print(six.next(step))
-        e
-        >>> print(six.next(step))
-        f
-        >>> print(six.next(step))
-        A woke up with A
-        returning g
-        returning h
-        returning i
-        g
-        >>> print("".join([six.next(step) for x in range(3)]))
-        returning j
-        returning k
-        returning l
-        returning m
-        hij
-        >>> pausers[1].send("B")
-        >>> print("".join([six.next(step) for x in range(4)]))
-        B woke up with B
-        returning n
-        returning o
-        returning p
-        returning q
-        klmn
-        """
-        # Get an iterator because of our funny nested loop below. Wrap the
-        # iterable in enumerate() so we count items that come through.
-        tuples = iter(enumerate(iterable))
-        # If the iterable is empty, this whole function is a no-op, and we can
-        # save ourselves some grief by just quitting out. In particular, once
-        # we enter the outer loop below, we're going to wait on the queue --
-        # but if we launched no coroutines with that queue as the destination,
-        # we could end up waiting a very long time.
-        try:
-            index, args = six.next(tuples)
-        except StopIteration:
-            return
-        # From this point forward, 'args' is the current arguments tuple and
-        # 'index+1' counts how many such tuples we've seen.
-        # This implementation relies on the fact that _execute() accepts an
-        # event-like object, and -- unless it's None -- the completed
-        # coroutine calls send(result). We slyly pass a queue rather than an
-        # event -- the same queue instance for all coroutines. This is why our
-        # queue interface intentionally resembles the event interface.
-        q = coros.queue(max_size=qsize)
-        # How many results have we yielded so far?
-        finished = 0
-        # This first loop is only until we've launched all the coroutines. Its
-        # complexity is because if iterable contains more args tuples than the
-        # size of our pool, attempting to _execute() the (poolsize+1)th
-        # coroutine would suspend until something completes and send()s its
-        # result to our queue. But to keep down queue overhead and to maximize
-        # responsiveness to our caller, we'd rather suspend on reading the
-        # queue. So we stuff the pool as full as we can, then wait for
-        # something to finish, then stuff more coroutines into the pool.
-        try:
-            while True:
-                # Before each yield, start as many new coroutines as we can fit.
-                # (The self.free() test isn't 100% accurate: if we happen to be
-                # executing in one of the pool's coroutines, we could _execute()
-                # without waiting even if self.free() reports 0. See _execute().)
-                # The point is that we don't want to wait in the _execute() call,
-                # we want to wait in the q.wait() call.
-                # IMPORTANT: at start, and whenever we've caught up with all
-                # coroutines we've launched so far, we MUST iterate this inner
-                # loop at least once, regardless of self.free() -- otherwise the
-                # q.wait() call below will deadlock!
-                # Recall that index is the index of the NEXT args tuple that we
-                # haven't yet launched. Therefore it counts how many args tuples
-                # we've launched so far.
-                while self.free() > 0 or finished == index:
-                    # Just like the implementation of execute_async(), save that
-                    # we're passing our queue instead of None as the "event" to
-                    # which to send() the result.
-                    self._execute(q, function, args, {})
-                    # We've consumed that args tuple, advance to next.
-                    index, args = six.next(tuples)
-                # Okay, we've filled up the pool again, yield a result -- which
-                # will probably wait for a coroutine to complete. Although we do
-                # have q.ready(), so we could iterate without waiting, we avoid
-                # that because every yield could involve considerable real time.
-                # We don't know how long it takes to return from yield, so every
-                # time we do, take the opportunity to stuff more requests into the
-                # pool before yielding again.
-                yield q.wait()
-                # Be sure to count results so we know when to stop!
-                finished += 1
-        except StopIteration:
-            pass
-        # Here we've exhausted the input iterable. index+1 is the total number
-        # of coroutines we've launched. We probably haven't yielded that many
-        # results yet. Wait for the rest of the results, yielding them as they
-        # arrive.
-        while finished < index + 1:
-            yield q.wait()
-            finished += 1
diff --git a/eventlet/eventlet/proc.py b/eventlet/eventlet/proc.py
deleted file mode 100644 (file)
index 52e98d8..0000000
+++ /dev/null
@@ -1,739 +0,0 @@
-"""
-This module provides means to spawn, kill and link coroutines. Linking means
-subscribing to the coroutine's result, either in form of return value or
-unhandled exception.
-
-To create a linkable coroutine use spawn function provided by this module:
-
-    >>> def demofunc(x, y):
-    ...    return x / y
-    >>> p = spawn(demofunc, 6, 2)
-
-The return value of :func:`spawn` is an instance of :class:`Proc` class that
-you can "link":
-
- * ``p.link(obj)`` - notify *obj* when the coroutine is finished
-
-What "notify" means here depends on the type of *obj*: a callable is simply
-called, an :class:`~eventlet.coros.Event` or a :class:`~eventlet.coros.queue`
-is notified using ``send``/``send_exception`` methods and if *obj* is another
-greenlet it's killed with :class:`LinkedExited` exception.
-
-Here's an example:
-
->>> event = coros.Event()
->>> _ = p.link(event)
->>> event.wait()
-3
-
-Now, even though *p* is finished it's still possible to link it. In this
-case the notification is performed immediatelly:
-
->>> try:
-...     p.link()
-... except LinkedCompleted:
-...     print('LinkedCompleted')
-LinkedCompleted
-
-(Without an argument, the link is created to the current greenlet)
-
-There are also :meth:`~eventlet.proc.Source.link_value` and
-:func:`link_exception` methods that only deliver a return value and an
-unhandled exception respectively (plain :meth:`~eventlet.proc.Source.link`
-delivers both).  Suppose we want to spawn a greenlet to do an important part of
-the task; if it fails then there's no way to complete the task so the parent
-must fail as well; :meth:`~eventlet.proc.Source.link_exception` is useful here:
-
->>> p = spawn(demofunc, 1, 0)
->>> _ = p.link_exception()
->>> try:
-...     api.sleep(1)
-... except LinkedFailed:
-...     print('LinkedFailed')
-LinkedFailed
-
-One application of linking is :func:`waitall` function: link to a bunch of
-coroutines and wait for all them to complete. Such a function is provided by
-this module.
-"""
-import sys
-
-from eventlet import api, coros, hubs
-from eventlet.support import six
-
-import warnings
-warnings.warn(
-    "The proc module is deprecated!  Please use the greenthread "
-    "module, or any of the many other Eventlet cross-coroutine "
-    "primitives, instead.",
-    DeprecationWarning, stacklevel=2)
-
-__all__ = ['LinkedExited',
-           'LinkedFailed',
-           'LinkedCompleted',
-           'LinkedKilled',
-           'ProcExit',
-           'Link',
-           'waitall',
-           'killall',
-           'Source',
-           'Proc',
-           'spawn',
-           'spawn_link',
-           'spawn_link_value',
-           'spawn_link_exception']
-
-
-class LinkedExited(Exception):
-    """Raised when a linked proc exits"""
-    msg = "%r exited"
-
-    def __init__(self, name=None, msg=None):
-        self.name = name
-        if msg is None:
-            msg = self.msg % self.name
-        Exception.__init__(self, msg)
-
-
-class LinkedCompleted(LinkedExited):
-    """Raised when a linked proc finishes the execution cleanly"""
-
-    msg = "%r completed successfully"
-
-
-class LinkedFailed(LinkedExited):
-    """Raised when a linked proc dies because of unhandled exception"""
-    msg = "%r failed with %s"
-
-    def __init__(self, name, typ, value=None, tb=None):
-        msg = self.msg % (name, typ.__name__)
-        LinkedExited.__init__(self, name, msg)
-
-
-class LinkedKilled(LinkedFailed):
-    """Raised when a linked proc dies because of unhandled GreenletExit
-    (i.e. it was killed)
-    """
-    msg = """%r was killed with %s"""
-
-
-def getLinkedFailed(name, typ, value=None, tb=None):
-    if issubclass(typ, api.GreenletExit):
-        return LinkedKilled(name, typ, value, tb)
-    return LinkedFailed(name, typ, value, tb)
-
-
-class ProcExit(api.GreenletExit):
-    """Raised when this proc is killed."""
-
-
-class Link(object):
-    """
-    A link to a greenlet, triggered when the greenlet exits.
-    """
-
-    def __init__(self, listener):
-        self.listener = listener
-
-    def cancel(self):
-        self.listener = None
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, *args):
-        self.cancel()
-
-
-class LinkToEvent(Link):
-
-    def __call__(self, source):
-        if self.listener is None:
-            return
-        if source.has_value():
-            self.listener.send(source.value)
-        else:
-            self.listener.send_exception(*source.exc_info())
-
-
-class LinkToGreenlet(Link):
-
-    def __call__(self, source):
-        if source.has_value():
-            self.listener.throw(LinkedCompleted(source.name))
-        else:
-            self.listener.throw(getLinkedFailed(source.name, *source.exc_info()))
-
-
-class LinkToCallable(Link):
-
-    def __call__(self, source):
-        self.listener(source)
-
-
-def waitall(lst, trap_errors=False, queue=None):
-    if queue is None:
-        queue = coros.queue()
-    index = -1
-    for (index, linkable) in enumerate(lst):
-        linkable.link(decorate_send(queue, index))
-    len = index + 1
-    results = [None] * len
-    count = 0
-    while count < len:
-        try:
-            index, value = queue.wait()
-        except Exception:
-            if not trap_errors:
-                raise
-        else:
-            results[index] = value
-        count += 1
-    return results
-
-
-class decorate_send(object):
-
-    def __init__(self, event, tag):
-        self._event = event
-        self._tag = tag
-
-    def __repr__(self):
-        params = (type(self).__name__, self._tag, self._event)
-        return '<%s tag=%r event=%r>' % params
-
-    def __getattr__(self, name):
-        assert name != '_event'
-        return getattr(self._event, name)
-
-    def send(self, value):
-        self._event.send((self._tag, value))
-
-
-def killall(procs, *throw_args, **kwargs):
-    if not throw_args:
-        throw_args = (ProcExit, )
-    wait = kwargs.pop('wait', False)
-    if kwargs:
-        raise TypeError('Invalid keyword argument for proc.killall(): %s' % ', '.join(kwargs.keys()))
-    for g in procs:
-        if not g.dead:
-            hubs.get_hub().schedule_call_global(0, g.throw, *throw_args)
-    if wait and api.getcurrent() is not hubs.get_hub().greenlet:
-        api.sleep(0)
-
-
-class NotUsed(object):
-
-    def __str__(self):
-        return '<Source instance does not hold a value or an exception>'
-
-    __repr__ = __str__
-
-_NOT_USED = NotUsed()
-
-
-def spawn_greenlet(function, *args):
-    """Create a new greenlet that will run ``function(*args)``.
-    The current greenlet won't be unscheduled. Keyword arguments aren't
-    supported (limitation of greenlet), use :func:`spawn` to work around that.
-    """
-    g = api.Greenlet(function)
-    g.parent = hubs.get_hub().greenlet
-    hubs.get_hub().schedule_call_global(0, g.switch, *args)
-    return g
-
-
-class Source(object):
-    """Maintain a set of links to the listeners. Delegate the sent value or
-    the exception to all of them.
-
-    To set up a link, use :meth:`link_value`, :meth:`link_exception` or
-    :meth:`link` method. The latter establishes both "value" and "exception"
-    link. It is possible to link to events, queues, greenlets and callables.
-
-    >>> source = Source()
-    >>> event = coros.Event()
-    >>> _ = source.link(event)
-
-    Once source's :meth:`send` or :meth:`send_exception` method is called, all
-    the listeners with the right type of link will be notified ("right type"
-    means that exceptions won't be delivered to "value" links and values won't
-    be delivered to "exception" links). Once link has been fired it is removed.
-
-    Notifying listeners is performed in the **mainloop** greenlet. Under the
-    hood notifying a link means executing a callback, see :class:`Link` class
-    for details. Notification *must not* attempt to switch to the hub, i.e.
-    call any blocking functions.
-
-    >>> source.send('hello')
-    >>> event.wait()
-    'hello'
-
-    Any error happened while sending will be logged as a regular unhandled
-    exception. This won't prevent other links from being fired.
-
-    There 3 kinds of listeners supported:
-
-     1. If *listener* is a greenlet (regardless if it's a raw greenlet or an
-        extension like :class:`Proc`), a subclass of :class:`LinkedExited`
-        exception is raised in it.
-
-     2. If *listener* is something with send/send_exception methods (event,
-        queue, :class:`Source` but not :class:`Proc`) the relevant method is
-        called.
-
-     3. If *listener* is a callable, it is called with 1 argument (the result)
-        for "value" links and with 3 arguments ``(typ, value, tb)`` for
-        "exception" links.
-    """
-
-    def __init__(self, name=None):
-        self.name = name
-        self._value_links = {}
-        self._exception_links = {}
-        self.value = _NOT_USED
-        self._exc = None
-
-    def _repr_helper(self):
-        result = []
-        result.append(repr(self.name))
-        if self.value is not _NOT_USED:
-            if self._exc is None:
-                res = repr(self.value)
-                if len(res) > 50:
-                    res = res[:50] + '...'
-                result.append('result=%s' % res)
-            else:
-                result.append('raised=%s' % (self._exc, ))
-        result.append('{%s:%s}' % (len(self._value_links), len(self._exception_links)))
-        return result
-
-    def __repr__(self):
-        klass = type(self).__name__
-        return '<%s at %s %s>' % (klass, hex(id(self)), ' '.join(self._repr_helper()))
-
-    def ready(self):
-        return self.value is not _NOT_USED
-
-    def has_value(self):
-        return self.value is not _NOT_USED and self._exc is None
-
-    def has_exception(self):
-        return self.value is not _NOT_USED and self._exc is not None
-
-    def exc_info(self):
-        if not self._exc:
-            return (None, None, None)
-        elif len(self._exc) == 3:
-            return self._exc
-        elif len(self._exc) == 1:
-            if isinstance(self._exc[0], type):
-                return self._exc[0], None, None
-            else:
-                return self._exc[0].__class__, self._exc[0], None
-        elif len(self._exc) == 2:
-            return self._exc[0], self._exc[1], None
-        else:
-            return self._exc
-
-    def link_value(self, listener=None, link=None):
-        if self.ready() and self._exc is not None:
-            return
-        if listener is None:
-            listener = api.getcurrent()
-        if link is None:
-            link = self.getLink(listener)
-        if self.ready() and listener is api.getcurrent():
-            link(self)
-        else:
-            self._value_links[listener] = link
-            if self.value is not _NOT_USED:
-                self._start_send()
-        return link
-
-    def link_exception(self, listener=None, link=None):
-        if self.value is not _NOT_USED and self._exc is None:
-            return
-        if listener is None:
-            listener = api.getcurrent()
-        if link is None:
-            link = self.getLink(listener)
-        if self.ready() and listener is api.getcurrent():
-            link(self)
-        else:
-            self._exception_links[listener] = link
-            if self.value is not _NOT_USED:
-                self._start_send_exception()
-        return link
-
-    def link(self, listener=None, link=None):
-        if listener is None:
-            listener = api.getcurrent()
-        if link is None:
-            link = self.getLink(listener)
-        if self.ready() and listener is api.getcurrent():
-            if self._exc is None:
-                link(self)
-            else:
-                link(self)
-        else:
-            self._value_links[listener] = link
-            self._exception_links[listener] = link
-            if self.value is not _NOT_USED:
-                if self._exc is None:
-                    self._start_send()
-                else:
-                    self._start_send_exception()
-        return link
-
-    def unlink(self, listener=None):
-        if listener is None:
-            listener = api.getcurrent()
-        self._value_links.pop(listener, None)
-        self._exception_links.pop(listener, None)
-
-    @staticmethod
-    def getLink(listener):
-        if hasattr(listener, 'throw'):
-            return LinkToGreenlet(listener)
-        if hasattr(listener, 'send'):
-            return LinkToEvent(listener)
-        elif hasattr(listener, '__call__'):
-            return LinkToCallable(listener)
-        else:
-            raise TypeError("Don't know how to link to %r" % (listener, ))
-
-    def send(self, value):
-        assert not self.ready(), "%s has been fired already" % self
-        self.value = value
-        self._exc = None
-        self._start_send()
-
-    def _start_send(self):
-        links_items = list(six.iteritems(self._value_links))
-        hubs.get_hub().schedule_call_global(0, self._do_send, links_items, self._value_links)
-
-    def send_exception(self, *throw_args):
-        assert not self.ready(), "%s has been fired already" % self
-        self.value = None
-        self._exc = throw_args
-        self._start_send_exception()
-
-    def _start_send_exception(self):
-        links_items = list(six.iteritems(self._exception_links))
-        hubs.get_hub().schedule_call_global(0, self._do_send, links_items, self._exception_links)
-
-    def _do_send(self, links, consult):
-        while links:
-            listener, link = links.pop()
-            try:
-                if listener in consult:
-                    try:
-                        link(self)
-                    finally:
-                        consult.pop(listener, None)
-            except:
-                hubs.get_hub().schedule_call_global(0, self._do_send, links, consult)
-                raise
-
-    def wait(self, timeout=None, *throw_args):
-        """Wait until :meth:`send` or :meth:`send_exception` is called or
-        *timeout* has expired. Return the argument of :meth:`send` or raise the
-        argument of :meth:`send_exception`. If *timeout* has expired, ``None``
-        is returned.
-
-        The arguments, when provided, specify how many seconds to wait and what
-        to do when *timeout* has expired. They are treated the same way as
-        :func:`~eventlet.api.timeout` treats them.
-        """
-        if self.value is not _NOT_USED:
-            if self._exc is None:
-                return self.value
-            else:
-                api.getcurrent().throw(*self._exc)
-        if timeout is not None:
-            timer = api.timeout(timeout, *throw_args)
-            timer.__enter__()
-            if timeout == 0:
-                if timer.__exit__(None, None, None):
-                    return
-                else:
-                    try:
-                        api.getcurrent().throw(*timer.throw_args)
-                    except:
-                        if not timer.__exit__(*sys.exc_info()):
-                            raise
-                    return
-            EXC = True
-        try:
-            try:
-                waiter = Waiter()
-                self.link(waiter)
-                try:
-                    return waiter.wait()
-                finally:
-                    self.unlink(waiter)
-            except:
-                EXC = False
-                if timeout is None or not timer.__exit__(*sys.exc_info()):
-                    raise
-        finally:
-            if timeout is not None and EXC:
-                timer.__exit__(None, None, None)
-
-
-class Waiter(object):
-
-    def __init__(self):
-        self.greenlet = None
-
-    def send(self, value):
-        """Wake up the greenlet that is calling wait() currently (if there is one).
-        Can only be called from get_hub().greenlet.
-        """
-        assert api.getcurrent() is hubs.get_hub().greenlet
-        if self.greenlet is not None:
-            self.greenlet.switch(value)
-
-    def send_exception(self, *throw_args):
-        """Make greenlet calling wait() wake up (if there is a wait()).
-        Can only be called from get_hub().greenlet.
-        """
-        assert api.getcurrent() is hubs.get_hub().greenlet
-        if self.greenlet is not None:
-            self.greenlet.throw(*throw_args)
-
-    def wait(self):
-        """Wait until send or send_exception is called. Return value passed
-        into send() or raise exception passed into send_exception().
-        """
-        assert self.greenlet is None
-        current = api.getcurrent()
-        assert current is not hubs.get_hub().greenlet
-        self.greenlet = current
-        try:
-            return hubs.get_hub().switch()
-        finally:
-            self.greenlet = None
-
-
-class Proc(Source):
-    """A linkable coroutine based on Source.
-    Upon completion, delivers coroutine's result to the listeners.
-    """
-
-    def __init__(self, name=None):
-        self.greenlet = None
-        Source.__init__(self, name)
-
-    def _repr_helper(self):
-        if self.greenlet is not None and self.greenlet.dead:
-            dead = '(dead)'
-        else:
-            dead = ''
-        return ['%r%s' % (self.greenlet, dead)] + Source._repr_helper(self)
-
-    def __repr__(self):
-        klass = type(self).__name__
-        return '<%s %s>' % (klass, ' '.join(self._repr_helper()))
-
-    def __nonzero__(self):
-        if self.ready():
-            # with current _run this does not makes any difference
-            # still, let keep it there
-            return False
-        # otherwise bool(proc) is the same as bool(greenlet)
-        if self.greenlet is not None:
-            return bool(self.greenlet)
-
-    __bool__ = __nonzero__
-
-    @property
-    def dead(self):
-        return self.ready() or self.greenlet.dead
-
-    @classmethod
-    def spawn(cls, function, *args, **kwargs):
-        """Return a new :class:`Proc` instance that is scheduled to execute
-        ``function(*args, **kwargs)`` upon the next hub iteration.
-        """
-        proc = cls()
-        proc.run(function, *args, **kwargs)
-        return proc
-
-    def run(self, function, *args, **kwargs):
-        """Create a new greenlet to execute ``function(*args, **kwargs)``.
-        The created greenlet is scheduled to run upon the next hub iteration.
-        """
-        assert self.greenlet is None, "'run' can only be called once per instance"
-        if self.name is None:
-            self.name = str(function)
-        self.greenlet = spawn_greenlet(self._run, function, args, kwargs)
-
-    def _run(self, function, args, kwargs):
-        """Internal top level function.
-        Execute *function* and send its result to the listeners.
-        """
-        try:
-            result = function(*args, **kwargs)
-        except:
-            self.send_exception(*sys.exc_info())
-            raise  # let mainloop log the exception
-        else:
-            self.send(result)
-
-    def throw(self, *throw_args):
-        """Used internally to raise the exception.
-
-        Behaves exactly like greenlet's 'throw' with the exception that
-        :class:`ProcExit` is raised by default. Do not use this function as it
-        leaves the current greenlet unscheduled forever. Use :meth:`kill`
-        method instead.
-        """
-        if not self.dead:
-            if not throw_args:
-                throw_args = (ProcExit, )
-            self.greenlet.throw(*throw_args)
-
-    def kill(self, *throw_args):
-        """
-        Raise an exception in the greenlet. Unschedule the current greenlet so
-        that this :class:`Proc` can handle the exception (or die).
-
-        The exception can be specified with *throw_args*. By default,
-        :class:`ProcExit` is raised.
-        """
-        if not self.dead:
-            if not throw_args:
-                throw_args = (ProcExit, )
-            hubs.get_hub().schedule_call_global(0, self.greenlet.throw, *throw_args)
-            if api.getcurrent() is not hubs.get_hub().greenlet:
-                api.sleep(0)
-
-    # QQQ maybe Proc should not inherit from Source (because its send() and send_exception()
-    # QQQ methods are for internal use only)
-
-
-spawn = Proc.spawn
-
-
-def spawn_link(function, *args, **kwargs):
-    p = spawn(function, *args, **kwargs)
-    p.link()
-    return p
-
-
-def spawn_link_value(function, *args, **kwargs):
-    p = spawn(function, *args, **kwargs)
-    p.link_value()
-    return p
-
-
-def spawn_link_exception(function, *args, **kwargs):
-    p = spawn(function, *args, **kwargs)
-    p.link_exception()
-    return p
-
-
-class wrap_errors(object):
-    """Helper to make function return an exception, rather than raise it.
-
-    Because every exception that is unhandled by greenlet will be logged by the hub,
-    it is desirable to prevent non-error exceptions from leaving a greenlet.
-    This can done with simple try/except construct:
-
-    def func1(*args, **kwargs):
-        try:
-            return func(*args, **kwargs)
-        except (A, B, C) as ex:
-            return ex
-
-    wrap_errors provides a shortcut to write that in one line:
-
-    func1 = wrap_errors((A, B, C), func)
-
-    It also preserves __str__ and __repr__ of the original function.
-    """
-
-    def __init__(self, errors, func):
-        """Make a new function from `func', such that it catches `errors' (an
-        Exception subclass, or a tuple of Exception subclasses) and return
-        it as a value.
-        """
-        self.errors = errors
-        self.func = func
-
-    def __call__(self, *args, **kwargs):
-        try:
-            return self.func(*args, **kwargs)
-        except self.errors as ex:
-            return ex
-
-    def __str__(self):
-        return str(self.func)
-
-    def __repr__(self):
-        return repr(self.func)
-
-    def __getattr__(self, item):
-        return getattr(self.func, item)
-
-
-class RunningProcSet(object):
-    """
-    Maintain a set of :class:`Proc` s that are still running, that is,
-    automatically remove a proc when it's finished. Provide a way to wait/kill
-    all of them
-    """
-
-    def __init__(self, *args):
-        self.procs = set(*args)
-        if args:
-            for p in self.args[0]:
-                p.link(lambda p: self.procs.discard(p))
-
-    def __len__(self):
-        return len(self.procs)
-
-    def __contains__(self, item):
-        if isinstance(item, api.Greenlet):
-            # special case for "api.getcurrent() in running_proc_set" to work
-            for x in self.procs:
-                if x.greenlet == item:
-                    return True
-        else:
-            return item in self.procs
-
-    def __iter__(self):
-        return iter(self.procs)
-
-    def add(self, p):
-        self.procs.add(p)
-        p.link(lambda p: self.procs.discard(p))
-
-    def spawn(self, func, *args, **kwargs):
-        p = spawn(func, *args, **kwargs)
-        self.add(p)
-        return p
-
-    def waitall(self, trap_errors=False):
-        while self.procs:
-            waitall(self.procs, trap_errors=trap_errors)
-
-    def killall(self, *throw_args, **kwargs):
-        return killall(self.procs, *throw_args, **kwargs)
-
-
-class Pool(object):
-
-    linkable_class = Proc
-
-    def __init__(self, limit):
-        self.semaphore = coros.Semaphore(limit)
-
-    def allocate(self):
-        self.semaphore.acquire()
-        g = self.linkable_class()
-        g.link(lambda *_args: self.semaphore.release())
-        return g
diff --git a/eventlet/eventlet/processes.py b/eventlet/eventlet/processes.py
deleted file mode 100644 (file)
index 1b5dfd6..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
-import warnings
-warnings.warn("eventlet.processes is deprecated in favor of "
-              "eventlet.green.subprocess, which is API-compatible with the standard "
-              " library subprocess module.",
-              DeprecationWarning, stacklevel=2)
-
-import errno
-import os
-import signal
-
-import eventlet
-from eventlet import greenio, pools
-from eventlet.green import subprocess
-
-
-class DeadProcess(RuntimeError):
-    pass
-
-
-def cooperative_wait(pobj, check_interval=0.01):
-    """ Waits for a child process to exit, returning the status
-    code.
-
-    Unlike ``os.wait``, :func:`cooperative_wait` does not block the entire
-    process, only the calling coroutine.  If the child process does not die,
-    :func:`cooperative_wait` could wait forever.
-
-    The argument *check_interval* is the amount of time, in seconds, that
-    :func:`cooperative_wait` will sleep between calls to ``os.waitpid``.
-    """
-    try:
-        while True:
-            status = pobj.poll()
-            if status >= 0:
-                return status
-            eventlet.sleep(check_interval)
-    except OSError as e:
-        if e.errno == errno.ECHILD:
-            # no child process, this happens if the child process
-            # already died and has been cleaned up, or if you just
-            # called with a random pid value
-            return -1
-        else:
-            raise
-
-
-class Process(object):
-    """Construct Process objects, then call read, and write on them."""
-    process_number = 0
-
-    def __init__(self, command, args, dead_callback=None):
-        self.process_number = self.process_number + 1
-        Process.process_number = self.process_number
-        self.command = command
-        self.args = args
-        self._dead_callback = dead_callback
-        self.run()
-
-    def run(self):
-        self.dead = False
-        self.started = False
-        self.proc = None
-
-        args = [self.command]
-        args.extend(self.args)
-        self.proc = subprocess.Popen(
-            args=args,
-            shell=False,
-            stdin=subprocess.PIPE,
-            stdout=subprocess.PIPE,
-            stderr=subprocess.STDOUT,
-            close_fds=True,
-        )
-        self.child_stdout_stderr = self.proc.stdout
-        self.child_stdin = self.proc.stdin
-
-        self.sendall = self.child_stdin.write
-        self.send = self.child_stdin.write
-        self.recv = self.child_stdout_stderr.read
-        self.readline = self.child_stdout_stderr.readline
-        self._read_first_result = False
-
-    def wait(self):
-        return cooperative_wait(self.proc)
-
-    def dead_callback(self):
-        self.wait()
-        self.dead = True
-        if self._dead_callback:
-            self._dead_callback()
-
-    def makefile(self, mode, *arg):
-        if mode.startswith('r'):
-            return self.child_stdout_stderr
-        if mode.startswith('w'):
-            return self.child_stdin
-        raise RuntimeError("Unknown mode", mode)
-
-    def read(self, amount=None):
-        """Reads from the stdout and stderr of the child process.
-        The first call to read() will return a string; subsequent
-        calls may raise a DeadProcess when EOF occurs on the pipe.
-        """
-        result = self.child_stdout_stderr.read(amount)
-        if result == '' and self._read_first_result:
-            # This process is dead.
-            self.dead_callback()
-            raise DeadProcess
-        else:
-            self._read_first_result = True
-        return result
-
-    def write(self, stuff):
-        written = 0
-        try:
-            written = self.child_stdin.write(stuff)
-            self.child_stdin.flush()
-        except ValueError as e:
-            # File was closed
-            assert str(e) == 'I/O operation on closed file'
-        if written == 0:
-            self.dead_callback()
-            raise DeadProcess
-
-    def flush(self):
-        self.child_stdin.flush()
-
-    def close(self):
-        self.child_stdout_stderr.close()
-        self.child_stdin.close()
-        self.dead_callback()
-
-    def close_stdin(self):
-        self.child_stdin.close()
-
-    def kill(self, sig=None):
-        if sig is None:
-            sig = signal.SIGTERM
-        pid = self.getpid()
-        os.kill(pid, sig)
-
-    def getpid(self):
-        return self.proc.pid
-
-
-class ProcessPool(pools.Pool):
-    def __init__(self, command, args=None, min_size=0, max_size=4):
-        """*command*
-            the command to run
-        """
-        self.command = command
-        if args is None:
-            args = []
-        self.args = args
-        pools.Pool.__init__(self, min_size, max_size)
-
-    def create(self):
-        """Generate a process
-        """
-        def dead_callback():
-            self.current_size -= 1
-        return Process(self.command, self.args, dead_callback)
-
-    def put(self, item):
-        if not item.dead:
-            if item.proc.poll() != -1:
-                item.dead_callback()
-            else:
-                pools.Pool.put(self, item)
index f71700e3179565aa0840be71453d64b75aaf1335..5a82238193670c5f9b2384b7a9075cdaf72c0ef9 100644 (file)
@@ -82,7 +82,9 @@ class Waiter(object):
             waiting = ' waiting'
         else:
             waiting = ''
-        return '<%s at %s%s greenlet=%r>' % (type(self).__name__, hex(id(self)), waiting, self.greenlet)
+        return '<%s at %s%s greenlet=%r>' % (
+            type(self).__name__, hex(id(self)), waiting, self.greenlet,
+        )
 
     def __str__(self):
         """
@@ -108,7 +110,8 @@ class Waiter(object):
         """Wake up the greenlet that is calling wait() currently (if there is one).
         Can only be called from Hub's greenlet.
         """
-        assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
+        assert getcurrent() is get_hub(
+        ).greenlet, "Can only use Waiter.switch method from the mainloop"
         if self.greenlet is not None:
             try:
                 self.greenlet.switch(value)
@@ -119,7 +122,8 @@ class Waiter(object):
         """Make greenlet calling wait() wake up (if there is a wait()).
         Can only be called from Hub's greenlet.
         """
-        assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
+        assert getcurrent() is get_hub(
+        ).greenlet, "Can only use Waiter.switch method from the mainloop"
         if self.greenlet is not None:
             try:
                 self.greenlet.throw(*throw_args)
@@ -193,7 +197,8 @@ class LightQueue(object):
         """Resizes the queue's maximum size.
 
         If the size is increased, and there are putters waiting, they may be woken up."""
-        if self.maxsize is not None and (size is None or size > self.maxsize):  # None is not comparable in 3.x
+        # None is not comparable in 3.x
+        if self.maxsize is not None and (size is None or size > self.maxsize):
             # Maybe wake some stuff up
             self._schedule_unlock()
         self.maxsize = size
@@ -217,7 +222,8 @@ class LightQueue(object):
 
         ``Queue(None)`` is never full.
         """
-        return self.maxsize is not None and self.qsize() >= self.maxsize  # None is not comparable in 3.x
+        # None is not comparable in 3.x
+        return self.maxsize is not None and self.qsize() >= self.maxsize
 
     def put(self, item, block=True, timeout=None):
         """Put an item into the queue.
@@ -335,14 +341,17 @@ class LightQueue(object):
                         getter = self.getters.pop()
                         if getter:
                             item = putter.item
-                            putter.item = _NONE  # this makes greenlet calling put() not to call _put() again
+                            # this makes greenlet calling put() not to call _put() again
+                            putter.item = _NONE
                             self._put(item)
                             item = self._get()
                             getter.switch(item)
                             putter.switch(putter)
                         else:
                             self.putters.add(putter)
-                elif self.putters and (self.getters or self.maxsize is None or self.qsize() < self.maxsize):
+                elif self.putters and (self.getters or
+                                       self.maxsize is None or
+                                       self.qsize() < self.maxsize):
                     putter = self.putters.pop()
                     putter.switch(putter)
                 else:
@@ -401,8 +410,8 @@ class Queue(LightQueue):
 
     def task_done(self):
         '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
-        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
-        that the processing on the task is complete.
+        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to
+        :meth:`task_done` tells the queue that the processing on the task is complete.
 
         If a :meth:`join` is currently blocking, it will resume when all items have been processed
         (meaning that a :meth:`task_done` call was received for every item that had been
index 65e79b6780b6349b465995cb65f77d598b2d27dc..4c2b75d3c716ffdb57ebbb581b57d6067889347d 100644 (file)
@@ -1,6 +1,7 @@
 import sys
+from contextlib import contextmanager
 
-from eventlet.support import greenlets
+from eventlet.support import greenlets, six
 
 
 def get_errno(exc):
@@ -32,3 +33,23 @@ else:
         Exception information is not visible outside of except statements.
         sys.exc_clear became obsolete and removed."""
         pass
+
+if sys.version_info[0] < 3:
+    def bytes_to_str(b, encoding='ascii'):
+        return b
+else:
+    def bytes_to_str(b, encoding='ascii'):
+        return b.decode(encoding)
+
+PY33 = sys.version_info[:2] == (3, 3)
+
+@contextmanager
+def capture_stderr():
+    stream = six.StringIO()
+    original = sys.stderr
+    try:
+        sys.stderr = stream
+        yield stream
+    finally:
+        sys.stderr = original
+        stream.seek(0)
index f57157a66629614c4cff52276d72c61adddb1392..c357866aa189ee5fcd84e36d6acf5a1609ab798e 100644 (file)
@@ -228,7 +228,7 @@ def getnameinfo(sockaddr, flags):
     except (ValueError, TypeError):
         if not isinstance(sockaddr, tuple):
             del sockaddr  # to pass a stdlib test that is
-                          # hyper-careful about reference counts
+            # hyper-careful about reference counts
             raise TypeError('getnameinfo() argument 1 must be a tuple')
         else:
             # must be ipv6 sockaddr, pretending we don't know how to resolve it
index f7abaa86599496b0df0799d6b0f05fdfd6b149c2..6f3b9bc9ef8eecda59e8c25e569979b46b93198b 100644 (file)
@@ -5,7 +5,7 @@ try:
     getcurrent = greenlet.greenlet.getcurrent
     GreenletExit = greenlet.greenlet.GreenletExit
     preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__)
-            >= distutils.version.LooseVersion('0.3.2'))
+                         >= distutils.version.LooseVersion('0.3.2'))
     greenlet = greenlet.greenlet
 except ImportError as e:
     raise
index aa00ae1178d8df242eba95cb28a443f8ecc5ecab..21b0e8032f6e77289a54275bc9ea7d5660b153e9 100644 (file)
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+from __future__ import absolute_import
+
+import functools
 import operator
 import sys
 import types
 
 __author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.5.2"
+__version__ = "1.8.0"
 
 
 # Useful for very coarse version differentiation.
@@ -105,14 +108,6 @@ class MovedModule(_LazyDescr):
         return _import_module(self.mod)
 
     def __getattr__(self, attr):
-        # Hack around the Django autoreloader. The reloader tries to get
-        # __file__ or __name__ of every module in sys.modules. This doesn't work
-        # well if this MovedModule is for an module that is unavailable on this
-        # machine (like winreg on Unix systems). Thus, we pretend __file__ and
-        # __name__ don't exist if the module hasn't been loaded yet. See issues
-        # 51 and #53.
-        if attr in ("__file__", "__name__") and self.mod not in sys.modules:
-            raise AttributeError
         _module = self._resolve()
         value = getattr(_module, attr)
         setattr(self, attr, value)
@@ -159,8 +154,72 @@ class MovedAttribute(_LazyDescr):
         return getattr(module, self.attr)
 
 
+class _SixMetaPathImporter(object):
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
 class _MovedItems(_LazyModule):
     """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
 
 
 _moved_attributes = [
@@ -168,11 +227,15 @@ _moved_attributes = [
     MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
     MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
     MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
     MovedAttribute("map", "itertools", "builtins", "imap", "map"),
     MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
     MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
     MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
     MovedAttribute("UserString", "UserString", "collections"),
     MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
@@ -182,12 +245,14 @@ _moved_attributes = [
     MovedModule("configparser", "ConfigParser"),
     MovedModule("copyreg", "copy_reg"),
     MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
     MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
     MovedModule("http_cookies", "Cookie", "http.cookies"),
     MovedModule("html_entities", "htmlentitydefs", "html.entities"),
     MovedModule("html_parser", "HTMLParser", "html.parser"),
     MovedModule("http_client", "httplib", "http.client"),
     MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
     MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
     MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
     MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
@@ -221,18 +286,19 @@ _moved_attributes = [
     MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
     MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
     MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
-    MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
     MovedModule("winreg", "_winreg"),
 ]
 for attr in _moved_attributes:
     setattr(_MovedItems, attr.name, attr)
     if isinstance(attr, MovedModule):
-        sys.modules[__name__ + ".moves." + attr.name] = attr
+        _importer._add_module(attr, "moves." + attr.name)
 del attr
 
 _MovedItems._moved_attributes = _moved_attributes
 
-moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
 
 
 class Module_six_moves_urllib_parse(_LazyModule):
@@ -255,6 +321,14 @@ _urllib_parse_moved_attributes = [
     MovedAttribute("unquote", "urllib", "urllib.parse"),
     MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
     MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
 ]
 for attr in _urllib_parse_moved_attributes:
     setattr(Module_six_moves_urllib_parse, attr.name, attr)
@@ -262,7 +336,8 @@ del attr
 
 Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
 
-sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
 
 
 class Module_six_moves_urllib_error(_LazyModule):
@@ -280,7 +355,8 @@ del attr
 
 Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
 
-sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
 
 
 class Module_six_moves_urllib_request(_LazyModule):
@@ -328,7 +404,8 @@ del attr
 
 Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
 
-sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
 
 
 class Module_six_moves_urllib_response(_LazyModule):
@@ -347,7 +424,8 @@ del attr
 
 Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
 
-sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
 
 
 class Module_six_moves_urllib_robotparser(_LazyModule):
@@ -363,22 +441,24 @@ del attr
 
 Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
 
-sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
 
 
 class Module_six_moves_urllib(types.ModuleType):
     """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
-    parse = sys.modules[__name__ + ".moves.urllib_parse"]
-    error = sys.modules[__name__ + ".moves.urllib_error"]
-    request = sys.modules[__name__ + ".moves.urllib_request"]
-    response = sys.modules[__name__ + ".moves.urllib_response"]
-    robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
 
     def __dir__(self):
         return ['parse', 'error', 'request', 'response', 'robotparser']
 
-
-sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
 
 
 def add_move(move):
@@ -405,11 +485,6 @@ if PY3:
     _func_code = "__code__"
     _func_defaults = "__defaults__"
     _func_globals = "__globals__"
-
-    _iterkeys = "keys"
-    _itervalues = "values"
-    _iteritems = "items"
-    _iterlists = "lists"
 else:
     _meth_func = "im_func"
     _meth_self = "im_self"
@@ -419,11 +494,6 @@ else:
     _func_defaults = "func_defaults"
     _func_globals = "func_globals"
 
-    _iterkeys = "iterkeys"
-    _itervalues = "itervalues"
-    _iteritems = "iteritems"
-    _iterlists = "iterlists"
-
 
 try:
     advance_iterator = next
@@ -472,30 +542,42 @@ get_function_defaults = operator.attrgetter(_func_defaults)
 get_function_globals = operator.attrgetter(_func_globals)
 
 
-def iterkeys(d, **kw):
-    """Return an iterator over the keys of a dictionary."""
-    return iter(getattr(d, _iterkeys)(**kw))
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
 
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
 
-def itervalues(d, **kw):
-    """Return an iterator over the values of a dictionary."""
-    return iter(getattr(d, _itervalues)(**kw))
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+else:
+    def iterkeys(d, **kw):
+        return iter(d.iterkeys(**kw))
 
+    def itervalues(d, **kw):
+        return iter(d.itervalues(**kw))
 
-def iteritems(d, **kw):
-    """Return an iterator over the (key, value) pairs of a dictionary."""
-    return iter(getattr(d, _iteritems)(**kw))
+    def iteritems(d, **kw):
+        return iter(d.iteritems(**kw))
 
+    def iterlists(d, **kw):
+        return iter(d.iterlists(**kw))
 
-def iterlists(d, **kw):
-    """Return an iterator over the (key, [values]) pairs of a dictionary."""
-    return iter(getattr(d, _iterlists)(**kw))
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
 
 
 if PY3:
     def b(s):
         return s.encode("latin-1")
-
     def u(s):
         return s
     unichr = chr
@@ -515,18 +597,14 @@ else:
     def b(s):
         return s
     # Workaround for standalone backslash
-
     def u(s):
         return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
     unichr = unichr
     int2byte = chr
-
     def byte2int(bs):
         return ord(bs[0])
-
     def indexbytes(buf, i):
         return ord(buf[i])
-
     def iterbytes(buf):
         return (ord(byte) for byte in buf)
     import StringIO
@@ -538,7 +616,10 @@ _add_doc(u, """Text literal""")
 if PY3:
     exec_ = getattr(moves.builtins, "exec")
 
+
     def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
         if value.__traceback__ is not tb:
             raise value.with_traceback(tb)
         raise value
@@ -556,6 +637,7 @@ else:
             _locs_ = _globs_
         exec("""exec _code_ in _globs_, _locs_""")
 
+
     exec_("""def reraise(tp, value, tb=None):
     raise tp, value, tb
 """)
@@ -568,7 +650,6 @@ if print_ is None:
         fp = kwargs.pop("file", sys.stdout)
         if fp is None:
             return
-
         def write(data):
             if not isinstance(data, basestring):
                 data = str(data)
@@ -619,23 +700,63 @@ if print_ is None:
 
 _add_doc(reraise, """Reraise an exception.""")
 
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
 
 def with_metaclass(meta, *bases):
     """Create a base class with a metaclass."""
-    return meta("NewBase", bases, {})
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
 
 
 def add_metaclass(metaclass):
     """Class decorator for creating a class with a metaclass."""
     def wrapper(cls):
         orig_vars = cls.__dict__.copy()
-        orig_vars.pop('__dict__', None)
-        orig_vars.pop('__weakref__', None)
         slots = orig_vars.get('__slots__')
         if slots is not None:
             if isinstance(slots, str):
                 slots = [slots]
             for slots_var in slots:
                 orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
         return metaclass(cls.__name__, cls.__bases__, orig_vars)
     return wrapper
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+            importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
index 5c9930aa6748d2ac7de60417dd4170e32c070ffc..f45c6d1dc5be4fbc5363472b0309ae0eaf174530 100644 (file)
@@ -100,7 +100,7 @@ class Timeout(BaseException):
 
     def __str__(self):
         """
-        >>> raise Timeout
+        >>> raise Timeout  # doctest: +IGNORE_EXCEPTION_DETAIL
         Traceback (most recent call last):
             ...
         Timeout
index 552f312f61fc95343da2276f6ee146392939ea54..e7f0db165c2905f32189f3c222840f56dab95c65 100644 (file)
@@ -257,11 +257,12 @@ def setup():
         _setup_already = True
 
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    sock.bind(('', 0))
+    sock.bind(('127.0.0.1', 0))
     sock.listen(1)
     csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     csock.connect(sock.getsockname())
     _wsock, _addr = sock.accept()
+    sock.close()
     _rsock = greenio.GreenSocket(csock)
 
     _reqq = Queue(maxsize=-1)
diff --git a/eventlet/eventlet/twistedutil/__init__.py b/eventlet/eventlet/twistedutil/__init__.py
deleted file mode 100644 (file)
index a5b8613..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-from eventlet.hubs import get_hub
-from eventlet import spawn, getcurrent
-
-def block_on(deferred):
-    cur = [getcurrent()]
-    synchronous = []
-    def cb(value):
-        if cur:
-            if getcurrent() is cur[0]:
-                synchronous.append((value, None))
-            else:
-                cur[0].switch(value)
-        return value
-    def eb(fail):
-        if cur:
-            if getcurrent() is cur[0]:
-                synchronous.append((None, fail))
-            else:
-                fail.throwExceptionIntoGenerator(cur[0])
-    deferred.addCallbacks(cb, eb)
-    if synchronous:
-        result, fail = synchronous[0]
-        if fail is not None:
-            fail.raiseException()
-        return result
-    try:
-        return get_hub().switch()
-    finally:
-        del cur[0]
-
-def _putResultInDeferred(deferred, f, args, kwargs):
-    try:
-        result = f(*args, **kwargs)
-    except:
-        from twisted.python import failure
-        f = failure.Failure()
-        deferred.errback(f)
-    else:
-        deferred.callback(result)
-
-def deferToGreenThread(func, *args, **kwargs):
-    from twisted.internet import defer
-    d = defer.Deferred()
-    spawn(_putResultInDeferred, d, func, args, kwargs)
-    return d
-
-def callInGreenThread(func, *args, **kwargs):
-    return spawn(func, *args, **kwargs)
-
-
-if __name__=='__main__':
-    import sys
-    try:
-        num = int(sys.argv[1])
-    except:
-        sys.exit('Supply number of test as an argument, 0, 1, 2 or 3')
-    from twisted.internet import reactor
-    def test():
-        print(block_on(reactor.resolver.getHostByName('www.google.com')))
-        print(block_on(reactor.resolver.getHostByName('###')))
-    if num==0:
-        test()
-    elif num==1:
-        spawn(test)
-        from eventlet.api import sleep
-        print('sleeping..')
-        sleep(5)
-        print('done sleeping..')
-    elif num==2:
-        from eventlet.twistedutil import join_reactor
-        spawn(test)
-        reactor.run()
-    elif num==3:
-        from eventlet.twistedutil import join_reactor
-        print("fails because it's impossible to use block_on from the mainloop")
-        reactor.callLater(0, test)
-        reactor.run()
-
diff --git a/eventlet/eventlet/twistedutil/join_reactor.py b/eventlet/eventlet/twistedutil/join_reactor.py
deleted file mode 100644 (file)
index 5964cbf..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-"""Integrate eventlet with twisted's reactor mainloop.
-
-You generally don't have to use it unless you need to call reactor.run()
-yourself.
-"""
-from eventlet.hubs.twistedr import BaseTwistedHub
-from eventlet.support import greenlets as greenlet
-from eventlet.hubs import _threadlocal, use_hub
-
-use_hub(BaseTwistedHub)
-assert not hasattr(_threadlocal, 'hub')
-hub = _threadlocal.hub = _threadlocal.Hub(greenlet.getcurrent())
diff --git a/eventlet/eventlet/twistedutil/protocol.py b/eventlet/eventlet/twistedutil/protocol.py
deleted file mode 100644 (file)
index 60d43ad..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-"""Basic twisted protocols converted to synchronous mode"""
-import sys
-from twisted.internet.protocol import Protocol as twistedProtocol
-from twisted.internet.error import ConnectionDone
-from twisted.internet.protocol import Factory, ClientFactory
-from twisted.internet import main
-from twisted.python import failure
-
-from eventlet import greenthread
-from eventlet import getcurrent
-from eventlet.coros import Queue
-from eventlet.event import Event as BaseEvent
-
-
-class ValueQueue(Queue):
-    """Queue that keeps the last item forever in the queue if it's an exception.
-    Useful if you send an exception over queue only once, and once sent it must be always
-    available.
-    """
-
-    def send(self, value=None, exc=None):
-        if exc is not None or not self.has_error():
-            Queue.send(self, value, exc)
-
-    def wait(self):
-        """The difference from Queue.wait: if there is an only item in the
-        Queue and it is an exception, raise it, but keep it in the Queue, so
-        that future calls to wait() will raise it again.
-        """
-        if self.has_error() and len(self.items)==1:
-            # the last item, which is an exception, raise without emptying the Queue
-            getcurrent().throw(*self.items[0][1])
-        else:
-            return Queue.wait(self)
-
-    def has_error(self):
-        return self.items and self.items[-1][1] is not None
-
-
-class Event(BaseEvent):
-
-    def send(self, value, exc=None):
-        if self.ready():
-            self.reset()
-        return BaseEvent.send(self, value, exc)
-
-    def send_exception(self, *throw_args):
-        if self.ready():
-            self.reset()
-        return BaseEvent.send_exception(self, *throw_args)
-
-class Producer2Event(object):
-
-    # implements IPullProducer
-
-    def __init__(self, event):
-        self.event = event
-
-    def resumeProducing(self):
-        self.event.send(1)
-
-    def stopProducing(self):
-        del self.event
-
-
-class GreenTransportBase(object):
-
-    transportBufferSize = None
-
-    def __init__(self, transportBufferSize=None):
-        if transportBufferSize is not None:
-            self.transportBufferSize = transportBufferSize
-        self._queue = ValueQueue()
-        self._write_event = Event()
-        self._disconnected_event = Event()
-
-    def build_protocol(self):
-        return self.protocol_class(self)
-
-    def _got_transport(self, transport):
-        self._queue.send(transport)
-
-    def _got_data(self, data):
-        self._queue.send(data)
-
-    def _connectionLost(self, reason):
-        self._disconnected_event.send(reason.value)
-        self._queue.send_exception(reason.value)
-        self._write_event.send_exception(reason.value)
-
-    def _wait(self):
-        if self.disconnecting or self._disconnected_event.ready():
-            if self._queue:
-                return self._queue.wait()
-            else:
-                raise self._disconnected_event.wait()
-        self.resumeProducing()
-        try:
-            return self._queue.wait()
-        finally:
-            self.pauseProducing()
-
-    def write(self, data, wait=True):
-        if self._disconnected_event.ready():
-            raise self._disconnected_event.wait()
-        if wait:
-            self._write_event.reset()
-            self.transport.write(data)
-            self._write_event.wait()
-        else:
-            self.transport.write(data)
-
-    def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE), wait=True):
-        self.transport.unregisterProducer()
-        self.transport.loseConnection(connDone)
-        if wait:
-            self._disconnected_event.wait()
-
-    def __getattr__(self, item):
-        if item=='transport':
-            raise AttributeError(item)
-        if hasattr(self, 'transport'):
-            try:
-                return getattr(self.transport, item)
-            except AttributeError:
-                me = type(self).__name__
-                trans = type(self.transport).__name__
-                raise AttributeError("Neither %r nor %r has attribute %r" % (me, trans, item))
-        else:
-            raise AttributeError(item)
-
-    def resumeProducing(self):
-        self.paused -= 1
-        if self.paused==0:
-            self.transport.resumeProducing()
-
-    def pauseProducing(self):
-        self.paused += 1
-        if self.paused==1:
-            self.transport.pauseProducing()
-
-    def _init_transport_producer(self):
-        self.transport.pauseProducing()
-        self.paused = 1
-
-    def _init_transport(self):
-        transport = self._queue.wait()
-        self.transport = transport
-        if self.transportBufferSize is not None:
-            transport.bufferSize = self.transportBufferSize
-        self._init_transport_producer()
-        transport.registerProducer(Producer2Event(self._write_event), False)
-
-
-class Protocol(twistedProtocol):
-
-    def __init__(self, recepient):
-        self._recepient = recepient
-
-    def connectionMade(self):
-        self._recepient._got_transport(self.transport)
-
-    def dataReceived(self, data):
-        self._recepient._got_data(data)
-
-    def connectionLost(self, reason):
-        self._recepient._connectionLost(reason)
-
-
-class UnbufferedTransport(GreenTransportBase):
-    """A very simple implementation of a green transport without an additional buffer"""
-
-    protocol_class = Protocol
-
-    def recv(self):
-        """Receive a single chunk of undefined size.
-
-        Return '' if connection was closed cleanly, raise the exception if it was closed
-        in a non clean fashion. After that all successive calls return ''.
-        """
-        if self._disconnected_event.ready():
-            return ''
-        try:
-            return self._wait()
-        except ConnectionDone:
-            return ''
-
-    def read(self):
-        """Read the data from the socket until the connection is closed cleanly.
-
-        If connection was closed in a non-clean fashion, the appropriate exception
-        is raised. In that case already received data is lost.
-        Next time read() is called it returns ''.
-        """
-        result = ''
-        while True:
-            recvd = self.recv()
-            if not recvd:
-                break
-            result += recvd
-        return result
-
-    # iterator protocol:
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        result = self.recv()
-        if not result:
-            raise StopIteration
-        return result
-
-
-class GreenTransport(GreenTransportBase):
-
-    protocol_class = Protocol
-    _buffer = ''
-    _error = None
-
-    def read(self, size=-1):
-        """Read size bytes or until EOF"""
-        if not self._disconnected_event.ready():
-            try:
-                while len(self._buffer) < size or size < 0:
-                    self._buffer += self._wait()
-            except ConnectionDone:
-                pass
-            except:
-                if not self._disconnected_event.has_exception():
-                    raise
-        if size>=0:
-            result, self._buffer = self._buffer[:size], self._buffer[size:]
-        else:
-            result, self._buffer = self._buffer, ''
-        if not result and self._disconnected_event.has_exception():
-            try:
-                self._disconnected_event.wait()
-            except ConnectionDone:
-                pass
-        return result
-
-    def recv(self, buflen=None):
-        """Receive a single chunk of undefined size but no bigger than buflen"""
-        if not self._disconnected_event.ready():
-            self.resumeProducing()
-            try:
-                try:
-                    recvd = self._wait()
-                    #print 'received %r' % recvd
-                    self._buffer += recvd
-                except ConnectionDone:
-                    pass
-                except:
-                    if not self._disconnected_event.has_exception():
-                        raise
-            finally:
-                self.pauseProducing()
-        if buflen is None:
-            result, self._buffer = self._buffer, ''
-        else:
-            result, self._buffer = self._buffer[:buflen], self._buffer[buflen:]
-        if not result and self._disconnected_event.has_exception():
-            try:
-                self._disconnected_event.wait()
-            except ConnectionDone:
-                pass
-        return result
-
-    # iterator protocol:
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        res = self.recv()
-        if not res:
-            raise StopIteration
-        return res
-
-
-class GreenInstanceFactory(ClientFactory):
-
-    def __init__(self, instance, event):
-        self.instance = instance
-        self.event = event
-
-    def buildProtocol(self, addr):
-        return self.instance
-
-    def clientConnectionFailed(self, connector, reason):
-        self.event.send_exception(reason.type, reason.value, reason.tb)
-
-
-class GreenClientCreator(object):
-    """Connect to a remote host and return a connected green transport instance.
-    """
-
-    gtransport_class = GreenTransport
-
-    def __init__(self, reactor=None, gtransport_class=None, *args, **kwargs):
-        if reactor is None:
-            from twisted.internet import reactor
-        self.reactor = reactor
-        if gtransport_class is not None:
-            self.gtransport_class = gtransport_class
-        self.args = args
-        self.kwargs = kwargs
-
-    def _make_transport_and_factory(self):
-        gtransport = self.gtransport_class(*self.args, **self.kwargs)
-        protocol = gtransport.build_protocol()
-        factory = GreenInstanceFactory(protocol, gtransport._queue)
-        return gtransport, factory
-
-    def connectTCP(self, host, port, *args, **kwargs):
-        gtransport, factory = self._make_transport_and_factory()
-        self.reactor.connectTCP(host, port, factory, *args, **kwargs)
-        gtransport._init_transport()
-        return gtransport
-
-    def connectSSL(self, host, port, *args, **kwargs):
-        gtransport, factory = self._make_transport_and_factory()
-        self.reactor.connectSSL(host, port, factory, *args, **kwargs)
-        gtransport._init_transport()
-        return gtransport
-
-    def connectTLS(self, host, port, *args, **kwargs):
-        gtransport, factory = self._make_transport_and_factory()
-        self.reactor.connectTLS(host, port, factory, *args, **kwargs)
-        gtransport._init_transport()
-        return gtransport
-
-    def connectUNIX(self, address, *args, **kwargs):
-        gtransport, factory = self._make_transport_and_factory()
-        self.reactor.connectUNIX(address, factory, *args, **kwargs)
-        gtransport._init_transport()
-        return gtransport
-
-    def connectSRV(self, service, domain, *args, **kwargs):
-        SRVConnector = kwargs.pop('ConnectorClass', None)
-        if SRVConnector is None:
-            from twisted.names.srvconnect import SRVConnector
-        gtransport, factory = self._make_transport_and_factory()
-        c = SRVConnector(self.reactor, service, domain, factory, *args, **kwargs)
-        c.connect()
-        gtransport._init_transport()
-        return gtransport
-
-
-class SimpleSpawnFactory(Factory):
-    """Factory that spawns a new greenlet for each incoming connection.
-
-    For an incoming connection a new greenlet is created using the provided
-    callback as a function and a connected green transport instance as an
-    argument.
-    """
-
-    gtransport_class = GreenTransport
-
-    def __init__(self, handler, gtransport_class=None, *args, **kwargs):
-        if callable(handler):
-            self.handler = handler
-        else:
-            self.handler = handler.send
-        if hasattr(handler, 'send_exception'):
-            self.exc_handler = handler.send_exception
-        if gtransport_class is not None:
-            self.gtransport_class = gtransport_class
-        self.args = args
-        self.kwargs = kwargs
-
-    def exc_handler(self, *args):
-        pass
-
-    def buildProtocol(self, addr):
-        gtransport = self.gtransport_class(*self.args, **self.kwargs)
-        protocol = gtransport.build_protocol()
-        protocol.factory = self
-        self._do_spawn(gtransport, protocol)
-        return protocol
-
-    def _do_spawn(self, gtransport, protocol):
-        greenthread.spawn(self._run_handler, gtransport, protocol)
-
-    def _run_handler(self, gtransport, protocol):
-        try:
-            gtransport._init_transport()
-        except Exception:
-            self.exc_handler(*sys.exc_info())
-        else:
-            self.handler(gtransport)
-
-
-class SpawnFactory(SimpleSpawnFactory):
-    """An extension to SimpleSpawnFactory that provides some control over
-    the greenlets it has spawned.
-    """
-
-    def __init__(self, handler, gtransport_class=None, *args, **kwargs):
-        self.greenlets = set()
-        SimpleSpawnFactory.__init__(self, handler, gtransport_class, *args, **kwargs)
-
-    def _do_spawn(self, gtransport, protocol):
-        g = greenthread.spawn(self._run_handler, gtransport, protocol)
-        self.greenlets.add(g)
-        g.link(lambda *_: self.greenlets.remove(g))
-
-    def waitall(self):
-        results = []
-        for g in self.greenlets:
-            results.append(g.wait())
-        return results
-
diff --git a/eventlet/eventlet/twistedutil/protocols/__init__.py b/eventlet/eventlet/twistedutil/protocols/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/eventlet/eventlet/twistedutil/protocols/basic.py b/eventlet/eventlet/twistedutil/protocols/basic.py
deleted file mode 100644 (file)
index 67a5967..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-from twisted.protocols import basic
-from twisted.internet.error import ConnectionDone
-from eventlet.twistedutil.protocol import GreenTransportBase
-
-
-class LineOnlyReceiver(basic.LineOnlyReceiver):
-
-    def __init__(self, recepient):
-        self._recepient = recepient
-
-    def connectionMade(self):
-        self._recepient._got_transport(self.transport)
-
-    def connectionLost(self, reason):
-        self._recepient._connectionLost(reason)
-
-    def lineReceived(self, line):
-        self._recepient._got_data(line)
-
-
-class LineOnlyReceiverTransport(GreenTransportBase):
-
-    protocol_class = LineOnlyReceiver
-
-    def readline(self):
-        return self._wait()
-
-    def sendline(self, line):
-        self.protocol.sendLine(line)
-
-    # iterator protocol:
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        try:
-            return self.readline()
-        except ConnectionDone:
-            raise StopIteration
-
diff --git a/eventlet/eventlet/util.py b/eventlet/eventlet/util.py
deleted file mode 100644 (file)
index ea0a464..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-import socket
-import warnings
-
-
-__original_socket__ = socket.socket
-
-
-def tcp_socket():
-    warnings.warn(
-        "eventlet.util.tcp_socket is deprecated. "
-        "Please use the standard socket technique for this instead: "
-        "sock = socket.socket()",
-        DeprecationWarning, stacklevel=2)
-    s = __original_socket__(socket.AF_INET, socket.SOCK_STREAM)
-    return s
-
-
-# if ssl is available, use eventlet.green.ssl for our ssl implementation
-from eventlet.green import ssl
-
-
-def wrap_ssl(sock, certificate=None, private_key=None, server_side=False):
-    warnings.warn(
-        "eventlet.util.wrap_ssl is deprecated. "
-        "Please use the eventlet.green.ssl.wrap_socket()",
-        DeprecationWarning, stacklevel=2)
-    return ssl.wrap_socket(
-        sock,
-        keyfile=private_key,
-        certfile=certificate,
-        server_side=server_side,
-    )
-
-
-def wrap_socket_with_coroutine_socket(use_thread_pool=None):
-    warnings.warn(
-        "eventlet.util.wrap_socket_with_coroutine_socket() is now "
-        "eventlet.patcher.monkey_patch(all=False, socket=True)",
-        DeprecationWarning, stacklevel=2)
-    from eventlet import patcher
-    patcher.monkey_patch(all=False, socket=True)
-
-
-def wrap_pipes_with_coroutine_pipes():
-    warnings.warn(
-        "eventlet.util.wrap_pipes_with_coroutine_pipes() is now "
-        "eventlet.patcher.monkey_patch(all=False, os=True)",
-        DeprecationWarning, stacklevel=2)
-    from eventlet import patcher
-    patcher.monkey_patch(all=False, os=True)
-
-
-def wrap_select_with_coroutine_select():
-    warnings.warn(
-        "eventlet.util.wrap_select_with_coroutine_select() is now "
-        "eventlet.patcher.monkey_patch(all=False, select=True)",
-        DeprecationWarning, stacklevel=2)
-    from eventlet import patcher
-    patcher.monkey_patch(all=False, select=True)
-
-
-def wrap_threading_local_with_coro_local():
-    """
-    monkey patch ``threading.local`` with something that is greenlet aware.
-    Since greenlets cannot cross threads, so this should be semantically
-    identical to ``threadlocal.local``
-    """
-    warnings.warn(
-        "eventlet.util.wrap_threading_local_with_coro_local() is now "
-        "eventlet.patcher.monkey_patch(all=False, thread=True) -- though"
-        "note that more than just _local is patched now.",
-        DeprecationWarning, stacklevel=2)
-
-    from eventlet import patcher
-    patcher.monkey_patch(all=False, thread=True)
-
-
-def socket_bind_and_listen(descriptor, addr=('', 0), backlog=50):
-    warnings.warn(
-        "eventlet.util.socket_bind_and_listen is deprecated."
-        "Please use the standard socket methodology for this instead:"
-        "sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)"
-        "sock.bind(addr)"
-        "sock.listen(backlog)",
-        DeprecationWarning, stacklevel=2)
-    set_reuse_addr(descriptor)
-    descriptor.bind(addr)
-    descriptor.listen(backlog)
-    return descriptor
-
-
-def set_reuse_addr(descriptor):
-    warnings.warn(
-        "eventlet.util.set_reuse_addr is deprecated."
-        "Please use the standard socket methodology for this instead:"
-        "sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)",
-        DeprecationWarning, stacklevel=2)
-    try:
-        descriptor.setsockopt(
-            socket.SOL_SOCKET,
-            socket.SO_REUSEADDR,
-            descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1)
-    except socket.error:
-        pass
index 64514c20865b46400ebde8de923b4446796719a9..3c93e902e2f9ab724661baf8504216d8e7837d63 100644 (file)
@@ -37,15 +37,17 @@ for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'):
 ACCEPTABLE_CLIENT_ERRORS = set((errno.ECONNRESET, errno.EPIPE))
 
 __all__ = ["WebSocketWSGI", "WebSocket"]
-PROTOCOL_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
-VALID_CLOSE_STATUS = (range(1000, 1004)
-                      + range(1007, 1012)
-                      # 3000-3999: reserved for use by libraries, frameworks,
-                      # and applications
-                      + range(3000, 4000)
-                      # 4000-4999: reserved for private use and thus can't
-                      # be registered
-                      + range(4000, 5000))
+PROTOCOL_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+VALID_CLOSE_STATUS = set(
+    list(range(1000, 1004)) +
+    list(range(1007, 1012)) +
+    # 3000-3999: reserved for use by libraries, frameworks,
+    # and applications
+    list(range(3000, 4000)) +
+    # 4000-4999: reserved for private use and thus can't
+    # be registered
+    list(range(4000, 5000))
+)
 
 
 class BadRequest(Exception):
@@ -115,7 +117,7 @@ class WebSocketWSGI(object):
                 raise BadRequest()
         except BadRequest as e:
             status = e.status
-            body = e.body or ''
+            body = e.body or b''
             headers = e.headers or []
             start_response(status,
                            [('Connection', 'close'), ] + headers)
@@ -166,25 +168,24 @@ class WebSocketWSGI(object):
         if qs is not None:
             location += '?' + qs
         if self.protocol_version == 75:
-            handshake_reply = ("HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
-                               "Upgrade: WebSocket\r\n"
-                               "Connection: Upgrade\r\n"
-                               "WebSocket-Origin: %s\r\n"
-                               "WebSocket-Location: %s\r\n\r\n" % (
-                                   environ.get('HTTP_ORIGIN'),
-                                   location))
+            handshake_reply = (
+                b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
+                b"Upgrade: WebSocket\r\n"
+                b"Connection: Upgrade\r\n"
+                b"WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n"
+                b"WebSocket-Location: " + six.b(location) + b"\r\n\r\n"
+            )
         elif self.protocol_version == 76:
-            handshake_reply = ("HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
-                               "Upgrade: WebSocket\r\n"
-                               "Connection: Upgrade\r\n"
-                               "Sec-WebSocket-Origin: %s\r\n"
-                               "Sec-WebSocket-Protocol: %s\r\n"
-                               "Sec-WebSocket-Location: %s\r\n"
-                               "\r\n%s" % (
-                                   environ.get('HTTP_ORIGIN'),
-                                   environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default'),
-                                   location,
-                                   response))
+            handshake_reply = (
+                b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
+                b"Upgrade: WebSocket\r\n"
+                b"Connection: Upgrade\r\n"
+                b"Sec-WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n"
+                b"Sec-WebSocket-Protocol: " +
+                six.b(environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default')) + b"\r\n"
+                b"Sec-WebSocket-Location: " + six.b(location) + b"\r\n"
+                b"\r\n" + response
+            )
         else:  # pragma NO COVER
             raise ValueError("Unknown WebSocket protocol version.")
         sock.sendall(handshake_reply)
@@ -219,15 +220,14 @@ class WebSocketWSGI(object):
         #    extensions = [i.strip() for i in extensions.split(',')]
 
         key = environ['HTTP_SEC_WEBSOCKET_KEY']
-        response = base64.b64encode(sha1(key + PROTOCOL_GUID).digest())
-        handshake_reply = ["HTTP/1.1 101 Switching Protocols",
-                           "Upgrade: websocket",
-                           "Connection: Upgrade",
-                           "Sec-WebSocket-Accept: %s" % (response, )]
+        response = base64.b64encode(sha1(six.b(key) + PROTOCOL_GUID).digest())
+        handshake_reply = [b"HTTP/1.1 101 Switching Protocols",
+                           b"Upgrade: websocket",
+                           b"Connection: Upgrade",
+                           b"Sec-WebSocket-Accept: " + response]
         if negotiated_protocol:
-            handshake_reply.append("Sec-WebSocket-Protocol: %s"
-                                   % (negotiated_protocol, ))
-        sock.sendall('\r\n'.join(handshake_reply) + '\r\n\r\n')
+            handshake_reply.append(b"Sec-WebSocket-Protocol: " + six.b(negotiated_protocol))
+        sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n')
         return RFC6455WebSocket(sock, environ, self.protocol_version,
                                 protocol=negotiated_protocol)
 
@@ -243,7 +243,7 @@ class WebSocketWSGI(object):
                 out += char
             elif char == " ":
                 spaces += 1
-        return int(out) / spaces
+        return int(out) // spaces
 
 
 class WebSocket(object):
@@ -256,7 +256,8 @@ class WebSocket(object):
     properties:
 
     path
-        The path value of the request.  This is the same as the WSGI PATH_INFO variable, but more convenient.
+        The path value of the request.  This is the same as the WSGI PATH_INFO variable,
+        but more convenient.
     protocol
         The value of the Websocket-Protocol header.
     origin
@@ -280,7 +281,7 @@ class WebSocket(object):
         self.environ = environ
         self.version = version
         self.websocket_closed = False
-        self._buf = ""
+        self._buf = b""
         self._msgs = collections.deque()
         self._sendlock = semaphore.Semaphore()
 
@@ -293,8 +294,8 @@ class WebSocket(object):
         if isinstance(message, six.text_type):
             message = message.encode('utf-8')
         elif not isinstance(message, six.binary_type):
-            message = b'%s' % (message,)
-        packed = b"\x00%s\xFF" % message
+            message = six.b(str(message))
+        packed = b"\x00" + message + b"\xFF"
         return packed
 
     def _parse_messages(self):
@@ -308,17 +309,17 @@ class WebSocket(object):
         end_idx = 0
         buf = self._buf
         while buf:
-            frame_type = ord(buf[0])
+            frame_type = six.indexbytes(buf, 0)
             if frame_type == 0:
                 # Normal message.
-                end_idx = buf.find("\xFF")
+                end_idx = buf.find(b"\xFF")
                 if end_idx == -1:  # pragma NO COVER
                     break
                 msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
                 buf = buf[end_idx + 1:]
             elif frame_type == 255:
                 # Closing handshake.
-                assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf
+                assert six.indexbytes(buf, 1) == 0, "Unexpected closing handshake: %r" % buf
                 self.websocket_closed = True
                 break
             else:
@@ -354,7 +355,7 @@ class WebSocket(object):
                 return None
             # no parsed messages, must mean buf needs more data
             delta = self.socket.recv(8096)
-            if delta == '':
+            if delta == b'':
                 return None
             self._buf += delta
             msgs = self._parse_messages()
@@ -425,7 +426,7 @@ class RFC6455WebSocket(WebSocket):
             return self.decoder.decode(data, final)
 
     def _get_bytes(self, numbytes):
-        data = ''
+        data = b''
         while len(data) < numbytes:
             d = self.socket.recv(numbytes - len(data))
             if not d:
@@ -447,14 +448,14 @@ class RFC6455WebSocket(WebSocket):
             self.data.append(data)
 
         def getvalue(self):
-            return ''.join(self.data)
+            return ('' if self.decoder else b'').join(self.data)
 
     @staticmethod
     def _apply_mask(data, mask, length=None, offset=0):
         if length is None:
             length = len(data)
         cnt = range(length)
-        return ''.join(chr(ord(data[i]) ^ mask[(offset + i) % 4]) for i in cnt)
+        return b''.join(six.int2byte(six.indexbytes(data, i) ^ mask[(offset + i) % 4]) for i in cnt)
 
     def _handle_control_frame(self, opcode, data):
         if opcode == 8:  # connection close
@@ -609,12 +610,13 @@ class RFC6455WebSocket(WebSocket):
             # NOTE: RFC6455 states:
             # A server MUST NOT mask any frames that it sends to the client
             rand = Random(time.time())
-            mask = map(rand.getrandbits, (8, ) * 4)
+            mask = [rand.getrandbits(8) for _ in six.moves.xrange(4)]
             message = RFC6455WebSocket._apply_mask(message, mask, length)
             maskdata = struct.pack('!BBBB', *mask)
         else:
-            maskdata = ''
-        return ''.join((header, lengthdata, maskdata, message))
+            maskdata = b''
+
+        return b''.join((header, lengthdata, maskdata, message))
 
     def wait(self):
         for i in self.iterator:
index 94a24056551aa644cbb5753465ff7640b8b80eca..e69d107d49e69d593125c5c83cc22542a3312264 100644 (file)
@@ -24,6 +24,7 @@ MINIMUM_CHUNK_SIZE = 4096
 # %(client_port)s is also available
 DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
                       ' %(status_code)s %(body_length)s %(wall_seconds).6f')
+is_accepting = True
 
 __all__ = ['server', 'format_date_time']
 
@@ -68,11 +69,13 @@ class Input(object):
     def __init__(self,
                  rfile,
                  content_length,
+                 sock,
                  wfile=None,
                  wfile_line=None,
                  chunked_input=False):
 
         self.rfile = rfile
+        self._sock = sock
         if content_length is not None:
             content_length = int(content_length)
         self.content_length = content_length
@@ -87,8 +90,9 @@ class Input(object):
         # (optional) headers to send with a "100 Continue" response. Set by
         # calling set_hundred_continue_respose_headers() on env['wsgi.input']
         self.hundred_continue_headers = None
+        self.is_hundred_continue_response_sent = False
 
-    def _send_hundred_continue_response(self):
+    def send_hundred_continue_response(self):
         towrite = []
 
         # 100 Continue status line
@@ -98,41 +102,46 @@ class Input(object):
         if self.hundred_continue_headers is not None:
             # 100 Continue headers
             for header in self.hundred_continue_headers:
-                towrite.append('%s: %s\r\n' % header)
+                towrite.append(six.b('%s: %s\r\n' % header))
 
         # Blank line
-        towrite.append('\r\n')
+        towrite.append(b'\r\n')
 
         self.wfile.writelines(towrite)
-        self.wfile = None
-        self.wfile_line = None
+
+        # Reinitialize chunk_length (expect more data)
+        self.chunk_length = -1
 
     def _do_read(self, reader, length=None):
-        if self.wfile is not None:
+        if self.wfile is not None and \
+                not self.is_hundred_continue_response_sent:
             # 100 Continue response
-            self._send_hundred_continue_response()
+            self.send_hundred_continue_response()
+            self.is_hundred_continue_response_sent = True
         if length is None and self.content_length is not None:
             length = self.content_length - self.position
         if length and length > self.content_length - self.position:
             length = self.content_length - self.position
         if not length:
-            return ''
+            return b''
         try:
             read = reader(length)
         except greenio.SSL.ZeroReturnError:
-            read = ''
+            read = b''
         self.position += len(read)
         return read
 
     def _chunked_read(self, rfile, length=None, use_readline=False):
-        if self.wfile is not None:
+        if self.wfile is not None and \
+                not self.is_hundred_continue_response_sent:
             # 100 Continue response
-            self._send_hundred_continue_response()
+            self.send_hundred_continue_response()
+            self.is_hundred_continue_response_sent = True
         try:
             if length == 0:
                 return ""
 
-            if length < 0:
+            if length and length < 0:
                 length = None
 
             if use_readline:
@@ -189,10 +198,10 @@ class Input(object):
         return self._do_read(self.rfile.readlines, hint)
 
     def __iter__(self):
-        return iter(self.read, '')
+        return iter(self.read, b'')
 
     def get_socket(self):
-        return self.rfile._sock
+        return self._sock
 
     def set_hundred_continue_response_headers(self, headers,
                                               capitalize_response_headers=True):
@@ -226,7 +235,7 @@ class FileObjectForHeaders(object):
         if size < 0:
             sz = MAX_HEADER_LINE
         rv = self.fp.readline(sz)
-        if size < 0 and len(rv) >= MAX_HEADER_LINE:
+        if len(rv) >= MAX_HEADER_LINE:
             raise HeaderLineTooLong()
         self.total_header_size += len(rv)
         if self.total_header_size > MAX_TOTAL_HEADER_SIZE:
@@ -267,8 +276,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
             self.raw_requestline = self.rfile.readline(self.server.url_length_limit)
             if len(self.raw_requestline) == self.server.url_length_limit:
                 self.wfile.write(
-                    "HTTP/1.0 414 Request URI Too Long\r\n"
-                    "Connection: close\r\nContent-length: 0\r\n\r\n")
+                    b"HTTP/1.0 414 Request URI Too Long\r\n"
+                    b"Connection: close\r\nContent-length: 0\r\n\r\n")
                 self.close_connection = 1
                 return
         except greenio.SSL.ZeroReturnError:
@@ -289,14 +298,14 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 return
         except HeaderLineTooLong:
             self.wfile.write(
-                "HTTP/1.0 400 Header Line Too Long\r\n"
-                "Connection: close\r\nContent-length: 0\r\n\r\n")
+                b"HTTP/1.0 400 Header Line Too Long\r\n"
+                b"Connection: close\r\nContent-length: 0\r\n\r\n")
             self.close_connection = 1
             return
         except HeadersTooLarge:
             self.wfile.write(
-                "HTTP/1.0 400 Headers Too Large\r\n"
-                "Connection: close\r\nContent-length: 0\r\n\r\n")
+                b"HTTP/1.0 400 Headers Too Large\r\n"
+                b"Connection: close\r\nContent-length: 0\r\n\r\n")
             self.close_connection = 1
             return
         finally:
@@ -308,8 +317,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 int(content_length)
             except ValueError:
                 self.wfile.write(
-                    "HTTP/1.0 400 Bad Request\r\n"
-                    "Connection: close\r\nContent-length: 0\r\n\r\n")
+                    b"HTTP/1.0 400 Bad Request\r\n"
+                    b"Connection: close\r\nContent-length: 0\r\n\r\n")
                 self.close_connection = 1
                 return
 
@@ -345,13 +354,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 status, response_headers = headers_set
                 headers_sent.append(1)
                 header_list = [header[0].lower() for header in response_headers]
-                towrite.append('%s %s\r\n' % (self.protocol_version, status))
+                towrite.append(six.b('%s %s\r\n' % (self.protocol_version, status)))
                 for header in response_headers:
-                    towrite.append('%s: %s\r\n' % header)
+                    towrite.append(six.b('%s: %s\r\n' % header))
 
                 # send Date header?
                 if 'date' not in header_list:
-                    towrite.append('Date: %s\r\n' % (format_date_time(time.time()),))
+                    towrite.append(six.b('Date: %s\r\n' % (format_date_time(time.time()),)))
 
                 client_conn = self.headers.get('Connection', '').lower()
                 send_keep_alive = False
@@ -369,21 +378,21 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 if 'content-length' not in header_list:
                     if self.request_version == 'HTTP/1.1':
                         use_chunked[0] = True
-                        towrite.append('Transfer-Encoding: chunked\r\n')
+                        towrite.append(b'Transfer-Encoding: chunked\r\n')
                     elif 'content-length' not in header_list:
                         # client is 1.0 and therefore must read to EOF
                         self.close_connection = 1
 
                 if self.close_connection:
-                    towrite.append('Connection: close\r\n')
+                    towrite.append(b'Connection: close\r\n')
                 elif send_keep_alive:
-                    towrite.append('Connection: keep-alive\r\n')
-                towrite.append('\r\n')
+                    towrite.append(b'Connection: keep-alive\r\n')
+                towrite.append(b'\r\n')
                 # end of header writing
 
             if use_chunked[0]:
                 # Write the chunked encoding
-                towrite.append("%x\r\n%s\r\n" % (len(data), data))
+                towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
             else:
                 towrite.append(data)
             try:
@@ -450,23 +459,21 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                     towrite.append(data)
                     towrite_size += len(data)
                     if towrite_size >= minimum_write_chunk_size:
-                        write(''.join(towrite))
+                        write(b''.join(towrite))
                         towrite = []
                         just_written_size = towrite_size
                         towrite_size = 0
                 if towrite:
                     just_written_size = towrite_size
-                    write(''.join(towrite))
+                    write(b''.join(towrite))
                 if not headers_sent or (use_chunked[0] and just_written_size):
-                    write('')
+                    write(b'')
             except Exception:
                 self.close_connection = 1
                 tb = traceback.format_exc()
                 self.server.log_message(tb)
                 if not headers_set:
-                    err_body = ""
-                    if(self.server.debug):
-                        err_body = tb
+                    err_body = six.b(tb) if self.server.debug else b''
                     start_response("500 Internal Server Error",
                                    [('Content-type', 'text/plain'),
                                     ('Content-length', len(err_body))])
@@ -476,7 +483,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 result.close()
             if (self.environ['eventlet.input'].chunked_input or
                     self.environ['eventlet.input'].position
-                    < self.environ['eventlet.input'].content_length):
+                    < (self.environ['eventlet.input'].content_length or 0)):
                 # Read and discard body if there was no pending 100-continue
                 if not self.environ['eventlet.input'].wfile:
                     # NOTE: MINIMUM_CHUNK_SIZE is used here for purpose different than chunking.
@@ -559,13 +566,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
 
         if env.get('HTTP_EXPECT') == '100-continue':
             wfile = self.wfile
-            wfile_line = 'HTTP/1.1 100 Continue\r\n'
+            wfile_line = b'HTTP/1.1 100 Continue\r\n'
         else:
             wfile = None
             wfile_line = None
         chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
         env['wsgi.input'] = env['eventlet.input'] = Input(
-            self.rfile, length, wfile=wfile, wfile_line=wfile_line,
+            self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
             chunked_input=chunked)
         env['eventlet.posthooks'] = []
 
@@ -581,6 +588,9 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
         greenio.shutdown_safe(self.connection)
         self.connection.close()
 
+    def handle_expect_100(self):
+        return True
+
 
 class Server(BaseHTTPServer.HTTPServer):
 
@@ -708,6 +718,19 @@ def server(sock, site,
     closed after server exits, but the underlying file descriptor will
     remain open, so if you have a dup() of *sock*, it will remain usable.
 
+    .. warning::
+
+        At the moment :func:`server` will always wait for active connections to finish before
+        exiting, even if there's an exception raised inside it
+        (*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit`
+        and those inheriting from `BaseException`).
+
+        While this may not be an issue normally, when it comes to long running HTTP connections
+        (like :mod:`eventlet.websocket`) it will become problematic and calling
+        :meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang,
+        even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long
+        as there are active connections.
+
     :param sock: Server socket, must be already bound to a port and listening.
     :param site: WSGI application function.
     :param log: File-like object that logs should be written to.
@@ -780,7 +803,7 @@ def server(sock, site,
 
         serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
             serv.pid, scheme, host, port))
-        while True:
+        while is_accepting:
             try:
                 client_socket = sock.accept()
                 client_socket[0].settimeout(serv.socket_timeout)
@@ -802,6 +825,9 @@ def server(sock, site,
                 serv.log.write("wsgi exiting\n")
                 break
     finally:
+        pool.waitall()
+        serv.log.write("(%s) wsgi exited, is_accepting=%s\n" % (
+            serv.pid, is_accepting))
         try:
             # NOTE: It's not clear whether we want this to leave the
             # socket open or close it.  Use cases like Spawning want
diff --git a/eventlet/examples/twisted/twisted_client.py b/eventlet/examples/twisted/twisted_client.py
deleted file mode 100644 (file)
index ec4418b..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Example for GreenTransport and GreenClientCreator.
-
-In this example reactor is started implicitly upon the first
-use of a blocking function.
-"""
-from twisted.internet import ssl
-from twisted.internet.error import ConnectionClosed
-from eventlet.twistedutil.protocol import GreenClientCreator
-from eventlet.twistedutil.protocols.basic import LineOnlyReceiverTransport
-from twisted.internet import reactor
-
-# read from TCP connection
-conn = GreenClientCreator(reactor).connectTCP('www.google.com', 80)
-conn.write('GET / HTTP/1.0\r\n\r\n')
-conn.loseWriteConnection()
-print(conn.read())
-
-# read from SSL connection line by line
-conn = GreenClientCreator(reactor, LineOnlyReceiverTransport).connectSSL('sf.net', 443, ssl.ClientContextFactory())
-conn.write('GET / HTTP/1.0\r\n\r\n')
-try:
-    for num, line in enumerate(conn):
-        print('%3s %r' % (num, line))
-except ConnectionClosed as ex:
-    print(ex)
-
diff --git a/eventlet/examples/twisted/twisted_http_proxy.py b/eventlet/examples/twisted/twisted_http_proxy.py
deleted file mode 100644 (file)
index 25f3462..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-"""Listen on port 8888 and pretend to be an HTTP proxy.
-It even works for some pages.
-
-Demonstrates how to
- * plug in eventlet into a twisted application (join_reactor)
- * call green functions from places where blocking calls
-   are not allowed (deferToGreenThread)
- * use eventlet.green package which provides [some of] the
-   standard library modules that don't block other greenlets.
-"""
-import re
-from twisted.internet.protocol import Factory
-from twisted.internet import reactor
-from twisted.protocols import basic
-
-from eventlet.twistedutil import deferToGreenThread
-from eventlet.twistedutil import join_reactor
-from eventlet.green import httplib
-
-class LineOnlyReceiver(basic.LineOnlyReceiver):
-
-    def connectionMade(self):
-        self.lines = []
-
-    def lineReceived(self, line):
-        if line:
-            self.lines.append(line)
-        elif self.lines:
-            self.requestReceived(self.lines)
-            self.lines = []
-
-    def requestReceived(self, lines):
-        request = re.match('^(\w+) http://(.*?)(/.*?) HTTP/1..$', lines[0])
-        #print request.groups()
-        method, host, path = request.groups()
-        headers = dict(x.split(': ', 1) for x in lines[1:])
-        def callback(result):
-            self.transport.write(str(result))
-            self.transport.loseConnection()
-        def errback(err):
-            err.printTraceback()
-            self.transport.loseConnection()
-        d = deferToGreenThread(http_request, method, host, path, headers=headers)
-        d.addCallbacks(callback, errback)
-
-def http_request(method, host, path, headers):
-    conn = httplib.HTTPConnection(host)
-    conn.request(method, path, headers=headers)
-    response = conn.getresponse()
-    body = response.read()
-    print(method, host, path, response.status, response.reason, len(body))
-    return format_response(response, body)
-
-def format_response(response, body):
-    result = "HTTP/1.1 %s %s" % (response.status, response.reason)
-    for k, v in response.getheaders():
-        result += '\r\n%s: %s' % (k, v)
-    if body:
-        result += '\r\n\r\n'
-        result += body
-        result += '\r\n'
-    return result
-
-class MyFactory(Factory):
-    protocol = LineOnlyReceiver
-
-print(__doc__)
-reactor.listenTCP(8888, MyFactory())
-reactor.run()
diff --git a/eventlet/examples/twisted/twisted_portforward.py b/eventlet/examples/twisted/twisted_portforward.py
deleted file mode 100644 (file)
index f62232d..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Port forwarder
-USAGE: twisted_portforward.py local_port remote_host remote_port"""
-import sys
-from twisted.internet import reactor
-from eventlet.twistedutil import join_reactor
-from eventlet.twistedutil.protocol import GreenClientCreator, SpawnFactory, UnbufferedTransport
-from eventlet import proc
-
-def forward(source, dest):
-    try:
-        while True:
-            x = source.recv()
-            if not x:
-                break
-            print('forwarding %s bytes' % len(x))
-            dest.write(x)
-    finally:
-        dest.loseConnection()
-
-def handler(local):
-    client = str(local.getHost())
-    print('accepted connection from %s' % client)
-    remote = GreenClientCreator(reactor, UnbufferedTransport).connectTCP(remote_host, remote_port)
-    a = proc.spawn(forward, remote, local)
-    b = proc.spawn(forward, local, remote)
-    proc.waitall([a, b], trap_errors=True)
-    print('closed connection to %s' % client)
-
-try:
-    local_port, remote_host, remote_port = sys.argv[1:]
-except ValueError:
-    sys.exit(__doc__)
-local_port = int(local_port)
-remote_port = int(remote_port)
-reactor.listenTCP(local_port, SpawnFactory(handler))
-reactor.run()
diff --git a/eventlet/examples/twisted/twisted_server.py b/eventlet/examples/twisted/twisted_server.py
deleted file mode 100644 (file)
index 3f0ca77..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Simple chat demo application.
-Listen on port 8007 and re-send all the data received to other participants.
-
-Demonstrates how to
- * plug in eventlet into a twisted application (join_reactor)
- * how to use SpawnFactory to start a new greenlet for each new request.
-"""
-from eventlet.twistedutil import join_reactor
-from eventlet.twistedutil.protocol import SpawnFactory
-from eventlet.twistedutil.protocols.basic import LineOnlyReceiverTransport
-
-class Chat:
-
-    def __init__(self):
-        self.participants = []
-
-    def handler(self, conn):
-        peer = conn.getPeer()
-        print('new connection from %s' % (peer, ))
-        conn.write("Welcome! There're %s participants already\n" % (len(self.participants)))
-        self.participants.append(conn)
-        try:
-            for line in conn:
-                if line:
-                    print('received from %s: %s' % (peer, line))
-                    for buddy in self.participants:
-                        if buddy is not conn:
-                            buddy.sendline('from %s: %s' % (peer, line))
-        except Exception as ex:
-            print(peer, ex)
-        else:
-            print(peer, 'connection done')
-        finally:
-            conn.loseConnection()
-            self.participants.remove(conn)
-
-print(__doc__)
-chat = Chat()
-from twisted.internet import reactor
-reactor.listenTCP(8007, SpawnFactory(chat.handler, LineOnlyReceiverTransport))
-reactor.run()
-
diff --git a/eventlet/examples/twisted/twisted_srvconnector.py b/eventlet/examples/twisted/twisted_srvconnector.py
deleted file mode 100644 (file)
index 97a408a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-from twisted.internet import reactor
-from twisted.names.srvconnect import SRVConnector
-from gnutls.interfaces.twisted import X509Credentials
-
-from eventlet.twistedutil.protocol import GreenClientCreator
-from eventlet.twistedutil.protocols.basic import LineOnlyReceiverTransport
-
-class NoisySRVConnector(SRVConnector):
-
-    def pickServer(self):
-        host, port = SRVConnector.pickServer(self)
-        print('Resolved _%s._%s.%s --> %s:%s' % (self.service, self.protocol, self.domain, host, port))
-        return host, port
-
-cred = X509Credentials(None, None)
-creator = GreenClientCreator(reactor, LineOnlyReceiverTransport)
-conn = creator.connectSRV('msrps', 'ag-projects.com',
-                          connectFuncName='connectTLS', connectFuncArgs=(cred,),
-                          ConnectorClass=NoisySRVConnector)
-
-request = """MSRP 49fh AUTH
-To-Path: msrps://alice@intra.example.com;tcp
-From-Path: msrps://alice.example.com:9892/98cjs;tcp
--------49fh$
-""".replace('\n', '\r\n')
-
-print('Sending:\n%s' % request)
-conn.write(request)
-print('Received:')
-for x in conn:
-    print(repr(x))
-    if '-------' in x:
-        break
diff --git a/eventlet/examples/twisted/twisted_xcap_proxy.py b/eventlet/examples/twisted/twisted_xcap_proxy.py
deleted file mode 100644 (file)
index 1ee5ee3..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-from twisted.internet.protocol import Factory
-from twisted.internet import reactor
-from twisted.protocols import basic
-
-from xcaplib.green import XCAPClient
-
-from eventlet.twistedutil import deferToGreenThread
-from eventlet.twistedutil import join_reactor
-
-class LineOnlyReceiver(basic.LineOnlyReceiver):
-
-    def lineReceived(self, line):
-        print('received: %r' % line)
-        if not line:
-            return
-        app, context, node = (line + ' ').split(' ', 3)
-        context = {'u' : 'users', 'g': 'global'}.get(context, context)
-        d = deferToGreenThread(client._get, app, node, globaltree=context=='global')
-        def callback(result):
-            self.transport.write(str(result))
-        def errback(error):
-            self.transport.write(error.getTraceback())
-        d.addCallback(callback)
-        d.addErrback(errback)
-
-class MyFactory(Factory):
-    protocol = LineOnlyReceiver
-
-client = XCAPClient('https://xcap.sipthor.net/xcap-root', 'alice@example.com', '123')
-reactor.listenTCP(8007, MyFactory())
-reactor.run()
index 06596f755e5dfc1b995da632ff07a25ff59d06dd..3dba2420dea1f4787d243bf0e37b2e84fa7d32ad 100644 (file)
@@ -82,17 +82,6 @@ def skip_unless(condition):
     return skipped_wrapper
 
 
-def requires_twisted(func):
-    """ Decorator that skips a test if Twisted is not present."""
-    def requirement(_f):
-        from eventlet.hubs import get_hub
-        try:
-            return 'Twisted' in type(get_hub()).__name__
-        except Exception:
-            return False
-    return skip_unless(requirement)(func)
-
-
 def using_pyevent(_f):
     from eventlet.hubs import get_hub
     return 'pyevent' in type(get_hub()).__module__
@@ -220,7 +209,8 @@ def verify_hub_empty():
     num_readers = len(hub.get_readers())
     num_writers = len(hub.get_writers())
     num_timers = hub.get_timers_count()
-    assert num_readers == 0 and num_writers == 0, "Readers: %s Writers: %s" % (num_readers, num_writers)
+    assert num_readers == 0 and num_writers == 0, "Readers: %s Writers: %s" % (
+        num_readers, num_writers)
 
 
 def find_command(command):
index 5bee32dcffdaeb1e85074cc62c48467d68d4a7da..f9636daa7d50d1b4218c93766976e90afa08fef4 100644 (file)
@@ -1,29 +1,24 @@
 import os
-import socket
 from unittest import TestCase, main
-import warnings
+
+from nose.tools import eq_
 
 import eventlet
-from eventlet import greenio, util, hubs, greenthread, spawn
+from eventlet import greenio, hubs, greenthread, spawn
+from eventlet.green import ssl
 from tests import skip_if_no_ssl
 
-warnings.simplefilter('ignore', DeprecationWarning)
-from eventlet import api
-warnings.simplefilter('default', DeprecationWarning)
-
 
 def check_hub():
     # Clear through the descriptor queue
-    api.sleep(0)
-    api.sleep(0)
+    eventlet.sleep(0)
+    eventlet.sleep(0)
     hub = hubs.get_hub()
     for nm in 'get_readers', 'get_writers':
         dct = getattr(hub, nm)()
         assert not dct, "hub.%s not empty: %s" % (nm, dct)
-    # Stop the runloop (unless it's twistedhub which does not support that)
-    if not getattr(hub, 'uses_twisted_reactor', None):
-        hub.abort(True)
-        assert not hub.running
+    hub.abort(True)
+    assert not hub.running
 
 
 class TestApi(TestCase):
@@ -50,14 +45,13 @@ class TestApi(TestCase):
                 listenfd.close()
 
         server = eventlet.listen(('0.0.0.0', 0))
-        api.spawn(accept_once, server)
+        eventlet.spawn_n(accept_once, server)
 
         client = eventlet.connect(('127.0.0.1', server.getsockname()[1]))
         fd = client.makefile('rb')
         client.close()
-        assert fd.readline() == b'hello\n'
-
-        assert fd.read() == b''
+        eq_(fd.readline(), b'hello\n')
+        eq_(fd.read(), b'')
         fd.close()
 
         check_hub()
@@ -74,14 +68,17 @@ class TestApi(TestCase):
                 greenio.shutdown_safe(listenfd)
                 listenfd.close()
 
-        server = api.ssl_listener(('0.0.0.0', 0),
-                                  self.certificate_file,
-                                  self.private_key_file)
-        api.spawn(accept_once, server)
+        server = eventlet.wrap_ssl(
+            eventlet.listen(('0.0.0.0', 0)),
+            self.private_key_file,
+            self.certificate_file,
+            server_side=True
+        )
+        eventlet.spawn_n(accept_once, server)
 
         raw_client = eventlet.connect(('127.0.0.1', server.getsockname()[1]))
-        client = util.wrap_ssl(raw_client)
-        fd = socket._fileobject(client, 'rb', 8192)
+        client = ssl.wrap_socket(raw_client)
+        fd = client.makefile('rb', 8192)
 
         assert fd.readline() == b'hello\r\n'
         try:
@@ -100,13 +97,13 @@ class TestApi(TestCase):
 
         def server(sock):
             client, addr = sock.accept()
-            api.sleep(0.1)
+            eventlet.sleep(0.1)
         server_evt = spawn(server, server_sock)
-        api.sleep(0)
+        eventlet.sleep(0)
         try:
             desc = eventlet.connect(('127.0.0.1', bound_port))
-            api.trampoline(desc, read=True, write=False, timeout=0.001)
-        except api.TimeoutError:
+            hubs.trampoline(desc, read=True, write=False, timeout=0.001)
+        except eventlet.TimeoutError:
             pass  # test passed
         else:
             assert False, "Didn't timeout"
@@ -128,8 +125,8 @@ class TestApi(TestCase):
         def go():
             desc = eventlet.connect(('127.0.0.1', bound_port))
             try:
-                api.trampoline(desc, read=True, timeout=0.1)
-            except api.TimeoutError:
+                hubs.trampoline(desc, read=True, timeout=0.1)
+            except eventlet.TimeoutError:
                 assert False, "Timed out"
 
             server.close()
@@ -138,21 +135,13 @@ class TestApi(TestCase):
 
         greenthread.spawn_after_local(0, go)
 
-        server_coro = api.spawn(client_closer, server)
+        server_coro = eventlet.spawn(client_closer, server)
         while not done[0]:
-            api.sleep(0)
-        api.kill(server_coro)
+            eventlet.sleep(0)
+        eventlet.kill(server_coro)
 
         check_hub()
 
-    def test_named(self):
-        named_foo = api.named('tests.api_test.Foo')
-        self.assertEqual(named_foo.__name__, "Foo")
-
-    def test_naming_missing_class(self):
-        self.assertRaises(
-            ImportError, api.named, 'this_name_should_hopefully_not_exist.Foo')
-
     def test_killing_dormant(self):
         DELAY = 0.1
         state = []
@@ -160,33 +149,33 @@ class TestApi(TestCase):
         def test():
             try:
                 state.append('start')
-                api.sleep(DELAY)
+                eventlet.sleep(DELAY)
             except:
                 state.append('except')
                 # catching GreenletExit
                 pass
             # when switching to hub, hub makes itself the parent of this greenlet,
             # thus after the function's done, the control will go to the parent
-            api.sleep(0)
+            eventlet.sleep(0)
             state.append('finished')
 
-        g = api.spawn(test)
-        api.sleep(DELAY / 2)
+        g = eventlet.spawn(test)
+        eventlet.sleep(DELAY / 2)
         self.assertEqual(state, ['start'])
-        api.kill(g)
+        eventlet.kill(g)
         # will not get there, unless switching is explicitly scheduled by kill
         self.assertEqual(state, ['start', 'except'])
-        api.sleep(DELAY)
+        eventlet.sleep(DELAY)
         self.assertEqual(state, ['start', 'except', 'finished'])
 
     def test_nested_with_timeout(self):
         def func():
-            return api.with_timeout(0.2, api.sleep, 2, timeout_value=1)
+            return eventlet.with_timeout(0.2, eventlet.sleep, 2, timeout_value=1)
 
         try:
-            api.with_timeout(0.1, func)
-            self.fail(u'Expected api.TimeoutError')
-        except api.TimeoutError:
+            eventlet.with_timeout(0.1, func)
+            self.fail(u'Expected TimeoutError')
+        except eventlet.TimeoutError:
             pass
 
 
index f304d3fe5bcc9f2de75696474726741198524b06..6facffeec2f751e0e3115411fa1713b36a8a96f1 100644 (file)
@@ -14,15 +14,15 @@ class BackdoorTest(LimitedTestCase):
         client = socket.socket()
         client.connect(('localhost', listener.getsockname()[1]))
         f = client.makefile('rw')
-        assert b'Python' in f.readline()
+        assert 'Python' in f.readline()
         f.readline()  # build info
         f.readline()  # help info
-        assert b'InteractiveConsole' in f.readline()
-        self.assertEqual(b'>>> ', f.read(4))
-        f.write(b'print("hi")\n')
+        assert 'InteractiveConsole' in f.readline()
+        self.assertEqual('>>> ', f.read(4))
+        f.write('print("hi")\n')
         f.flush()
-        self.assertEqual(b'hi\n', f.readline())
-        self.assertEqual(b'>>> ', f.read(4))
+        self.assertEqual('hi\n', f.readline())
+        self.assertEqual('>>> ', f.read(4))
         f.close()
         client.close()
         serv.kill()
index 83726bc47de6d92f85c93b526c60e6e2d6f6b930..194699ca8756649c5bbd23c8428aa56c91d25f34 100644 (file)
@@ -125,8 +125,8 @@ class TestServe(LimitedTestCase):
 
         eventlet.spawn(eventlet.serve, server, handle)
         client = eventlet.wrap_ssl(eventlet.connect(('localhost', port)))
-        client.sendall("echo")
-        self.assertEqual("echo", client.recv(1024))
+        client.sendall(b"echo")
+        self.assertEqual(b"echo", client.recv(1024))
 
     def test_socket_reuse(self):
         lsock1 = eventlet.listen(('localhost', 0))
index 31179d19777174cb7a6847dba9ae4f6ccfb1ef59..9fc9ebc2005232b80486d368c3be116341cd6eb7 100644 (file)
@@ -453,7 +453,8 @@ class DBConnectionPool(DBTester):
 
 
 class DummyConnection(object):
-    pass
+    def rollback(self):
+        pass
 
 
 class DummyDBModule(object):
@@ -505,54 +506,69 @@ class RawConnectionPool(DBConnectionPool):
             **self._auth)
 
 
-class TestRawConnectionPool(TestCase):
-    def test_issue_125(self):
-        # pool = self.create_pool(min_size=3, max_size=5)
-        pool = db_pool.RawConnectionPool(
-            DummyDBModule(),
-            dsn="dbname=test user=jessica port=5433",
-            min_size=3, max_size=5)
-        conn = pool.get()
-        pool.put(conn)
+def test_raw_pool_issue_125():
+    # pool = self.create_pool(min_size=3, max_size=5)
+    pool = db_pool.RawConnectionPool(
+        DummyDBModule(),
+        dsn="dbname=test user=jessica port=5433",
+        min_size=3, max_size=5)
+    conn = pool.get()
+    pool.put(conn)
 
-    def test_custom_cleanup_ok(self):
-        cleanup_mock = mock.Mock()
-        pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup_mock)
-        conn = pool.get()
-        pool.put(conn)
-        assert cleanup_mock.call_count == 1
 
-        with pool.item() as conn:
-            pass
-        assert cleanup_mock.call_count == 2
+def test_raw_pool_custom_cleanup_ok():
+    cleanup_mock = mock.Mock()
+    pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup_mock)
+    conn = pool.get()
+    pool.put(conn)
+    assert cleanup_mock.call_count == 1
 
-    def test_custom_cleanup_arg_error(self):
-        cleanup_mock = mock.Mock(side_effect=NotImplementedError)
-        pool = db_pool.RawConnectionPool(DummyDBModule())
-        conn = pool.get()
-        pool.put(conn, cleanup=cleanup_mock)
-        assert cleanup_mock.call_count == 1
+    with pool.item() as conn:
+        pass
+    assert cleanup_mock.call_count == 2
 
-        with pool.item(cleanup=cleanup_mock):
-            pass
-        assert cleanup_mock.call_count == 2
 
-    def test_custom_cleanup_fatal(self):
-        state = [0]
+def test_raw_pool_custom_cleanup_arg_error():
+    cleanup_mock = mock.Mock(side_effect=NotImplementedError)
+    pool = db_pool.RawConnectionPool(DummyDBModule())
+    conn = pool.get()
+    pool.put(conn, cleanup=cleanup_mock)
+    assert cleanup_mock.call_count == 1
 
-        def cleanup(conn):
-            state[0] += 1
-            raise KeyboardInterrupt
+    with pool.item(cleanup=cleanup_mock):
+        pass
+    assert cleanup_mock.call_count == 2
 
-        pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup)
-        conn = pool.get()
-        try:
-            pool.put(conn)
-        except KeyboardInterrupt:
-            pass
-        else:
-            assert False, 'Expected KeyboardInterrupt'
-        assert state[0] == 1
+
+def test_raw_pool_custom_cleanup_fatal():
+    state = [0]
+
+    def cleanup(conn):
+        state[0] += 1
+        raise KeyboardInterrupt
+
+    pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup)
+    conn = pool.get()
+    try:
+        pool.put(conn)
+    except KeyboardInterrupt:
+        pass
+    else:
+        assert False, 'Expected KeyboardInterrupt'
+    assert state[0] == 1
+
+
+def test_raw_pool_clear_update_current_size():
+    # https://github.com/eventlet/eventlet/issues/139
+    # BaseConnectionPool.clear does not update .current_size.
+    # That leads to situation when new connections could not be created.
+    pool = db_pool.RawConnectionPool(DummyDBModule())
+    pool.get().close()
+    assert pool.current_size == 1
+    assert len(pool.free_items) == 1
+    pool.clear()
+    assert pool.current_size == 0
+    assert len(pool.free_items) == 0
 
 
 get_auth = get_database_auth
index 67617f43f91080954b74098ce6bfd9c0a104cd1f..8710dc034b2e02e1e0a9949e7614123030e4eaee 100644 (file)
@@ -2,7 +2,7 @@ import sys
 from unittest import TestCase
 
 from eventlet import debug
-from eventlet.support import six
+from eventlet.support import capture_stderr, six
 from tests import LimitedTestCase, main
 import eventlet
 
@@ -107,10 +107,7 @@ class TestDebug(LimitedTestCase):
             s.recv(1)
             {}[1]  # keyerror
 
-        fake = six.StringIO()
-        orig = sys.stderr
-        sys.stderr = fake
-        try:
+        with capture_stderr() as fake:
             gt = eventlet.spawn(hurl, client_2)
             eventlet.sleep(0)
             client.send(b' ')
@@ -118,10 +115,8 @@ class TestDebug(LimitedTestCase):
             # allow the "hurl" greenlet to trigger the KeyError
             # not sure why the extra context switch is needed
             eventlet.sleep(0)
-        finally:
-            sys.stderr = orig
-            self.assertRaises(KeyError, gt.wait)
-            debug.hub_exceptions(False)
+        self.assertRaises(KeyError, gt.wait)
+        debug.hub_exceptions(False)
         # look for the KeyError exception in the traceback
         assert 'KeyError: 1' in fake.getvalue(), "Traceback not in:\n" + fake.getvalue()
 
index 4888e9d635788d4ebd1dd691fd555439d2990547..f8931c1dc66b8e91e2910f56a6d1f0f0f1b89ffd 100644 (file)
@@ -1,4 +1,5 @@
 import os
+from eventlet.support import six
 from tests.patcher_test import ProcessBase
 from tests import skip_with_pyevent
 
index 5ef177c1575ea1392f97dfc70fc76a8f4793286b..f15883fd34f0897f3b161203b668c5b59b5c2ffe 100644 (file)
@@ -8,6 +8,7 @@ import eventlet
 import os
 import sys
 import signal
+from eventlet.support import bytes_to_str, six
 mydir = %r
 signal_file = os.path.join(mydir, "output.txt")
 pid = os.fork()
@@ -30,18 +31,18 @@ if (pid != 0):
         break
       except (IOError, IndexError):
         eventlet.sleep(0.1)
-    print('result {0}'.format(result))
+    print('result {0}'.format(bytes_to_str(result)))
   finally:
     os.kill(pid, signal.SIGTERM)
 else:
   try:
     s = eventlet.listen(('', 0))
     fd = open(signal_file, "wb")
-    fd.write(str(s.getsockname()[1]))
-    fd.write("\\n")
+    fd.write(six.b(str(s.getsockname()[1])))
+    fd.write(b"\\n")
     fd.flush()
     s.accept()
-    fd.write("done")
+    fd.write(b"done")
     fd.flush()
   finally:
     fd.close()
index c56b946afbcb4e707eebdcb7a9df4dc8957775ad..4b375bae13dc52c59a53685545d3b8b25e1530e1 100644 (file)
@@ -9,10 +9,12 @@ import socket as _orig_sock
 import sys
 import tempfile
 
+from nose.tools import eq_
+
 from eventlet import event, greenio, debug
 from eventlet.hubs import get_hub
 from eventlet.green import select, socket, time, ssl
-from eventlet.support import get_errno, six
+from eventlet.support import capture_stderr, get_errno, six
 from tests import (
     LimitedTestCase, main,
     skip_with_pyevent, skipped, skip_if, skip_on_windows,
@@ -102,7 +104,7 @@ class TestGreenSocket(LimitedTestCase):
         s.settimeout(0.1)
         gs = greenio.GreenSocket(s)
         e = gs.connect_ex(('192.0.2.1', 80))
-        if not e in (errno.EHOSTUNREACH, errno.ENETUNREACH):
+        if e not in (errno.EHOSTUNREACH, errno.ENETUNREACH):
             self.assertEqual(e, errno.EAGAIN)
 
     def test_recv_timeout(self):
@@ -271,7 +273,7 @@ class TestGreenSocket(LimitedTestCase):
             # by closing the socket prior to using the made file
             try:
                 conn, addr = listener.accept()
-                fd = conn.makefile('w')
+                fd = conn.makefile('wb')
                 conn.close()
                 fd.write(b'hello\n')
                 fd.close()
@@ -285,7 +287,7 @@ class TestGreenSocket(LimitedTestCase):
             # by closing the made file and then sending a character
             try:
                 conn, addr = listener.accept()
-                fd = conn.makefile('w')
+                fd = conn.makefile('wb')
                 fd.write(b'hello')
                 fd.close()
                 conn.send(b'\n')
@@ -298,7 +300,7 @@ class TestGreenSocket(LimitedTestCase):
         def did_it_work(server):
             client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
             client.connect(('127.0.0.1', server.getsockname()[1]))
-            fd = client.makefile()
+            fd = client.makefile('rb')
             client.close()
             assert fd.readline() == b'hello\n'
             assert fd.read() == b''
@@ -327,7 +329,7 @@ class TestGreenSocket(LimitedTestCase):
             # closing the file object should close everything
             try:
                 conn, addr = listener.accept()
-                conn = conn.makefile('w')
+                conn = conn.makefile('wb')
                 conn.write(b'hello\n')
                 conn.close()
                 gc.collect()
@@ -342,7 +344,7 @@ class TestGreenSocket(LimitedTestCase):
         killer = eventlet.spawn(accept_once, server)
         client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         client.connect(('127.0.0.1', server.getsockname()[1]))
-        fd = client.makefile()
+        fd = client.makefile('rb')
         client.close()
         assert fd.read() == b'hello\n'
         assert fd.read() == b''
@@ -450,7 +452,7 @@ class TestGreenSocket(LimitedTestCase):
 
         def sender(evt):
             s2, addr = server.accept()
-            wrap_wfile = s2.makefile('w')
+            wrap_wfile = s2.makefile('wb')
 
             eventlet.sleep(0.02)
             wrap_wfile.write(b'hi')
@@ -601,7 +603,7 @@ class TestGreenSocket(LimitedTestCase):
     def test_sockopt_interface(self):
         sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0
-        assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) == '\000'
+        assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) == b'\000'
         sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
 
     def test_socketpair_select(self):
@@ -611,6 +613,38 @@ class TestGreenSocket(LimitedTestCase):
         assert select.select([], [s1], [], 0) == ([], [s1], [])
 
 
+def test_get_fileno_of_a_socket_works():
+    class DummySocket(object):
+        def fileno(self):
+            return 123
+    assert select.get_fileno(DummySocket()) == 123
+
+
+def test_get_fileno_of_an_int_works():
+    assert select.get_fileno(123) == 123
+
+
+def test_get_fileno_of_wrong_type_fails():
+    try:
+        select.get_fileno('foo')
+    except TypeError as ex:
+        assert str(ex) == 'Expected int or long, got <type \'str\'>'
+    else:
+        assert False, 'Expected TypeError not raised'
+
+
+def test_get_fileno_of_a_socket_with_fileno_returning_wrong_type_fails():
+    class DummySocket(object):
+        def fileno(self):
+            return 'foo'
+    try:
+        select.get_fileno(DummySocket())
+    except TypeError as ex:
+        assert str(ex) == 'Expected int or long, got <type \'str\'>'
+    else:
+        assert False, 'Expected TypeError not raised'
+
+
 class TestGreenPipe(LimitedTestCase):
     @skip_on_windows
     def setUp(self):
@@ -627,7 +661,7 @@ class TestGreenPipe(LimitedTestCase):
         wf = greenio.GreenPipe(w, 'w', 0)
 
         def sender(f, content):
-            for ch in content:
+            for ch in map(six.int2byte, six.iterbytes(content)):
                 eventlet.sleep(0.0001)
                 f.write(ch)
             f.close()
@@ -638,7 +672,7 @@ class TestGreenPipe(LimitedTestCase):
             line = rf.readline()
             eventlet.sleep(0.01)
             self.assertEqual(line, one_line)
-        self.assertEqual(rf.readline(), '')
+        self.assertEqual(rf.readline(), b'')
 
     def test_pipe_read(self):
         # ensure that 'readline' works properly on GreenPipes when data is not
@@ -663,10 +697,10 @@ class TestGreenPipe(LimitedTestCase):
         eventlet.sleep(0)
 
         line = r.readline()
-        self.assertEqual(line, 'line\n')
+        self.assertEqual(line, b'line\n')
 
         line = r.readline()
-        self.assertEqual(line, 'line\r\n')
+        self.assertEqual(line, b'line\r\n')
 
         gt.wait()
 
@@ -676,7 +710,7 @@ class TestGreenPipe(LimitedTestCase):
         r = greenio.GreenPipe(r)
         w = greenio.GreenPipe(w, 'w')
 
-        large_message = b"".join([1024 * chr(i) for i in range(65)])
+        large_message = b"".join([1024 * six.int2byte(i) for i in range(65)])
 
         def writer():
             w.write(large_message)
@@ -895,5 +929,21 @@ def test_set_nonblocking():
     assert new_flags == (orig_flags | os.O_NONBLOCK)
 
 
+def test_socket_del_fails_gracefully_when_not_fully_initialized():
+    # Regression introduced in da87716714689894f23d0db7b003f26d97031e83, reported in:
+    # * GH #137 https://github.com/eventlet/eventlet/issues/137
+    # * https://bugs.launchpad.net/oslo.messaging/+bug/1369999
+
+    class SocketSubclass(socket.socket):
+
+        def __init__(self):
+            pass
+
+    with capture_stderr() as err:
+        SocketSubclass()
+
+    assert err.getvalue() == ''
+
+
 if __name__ == '__main__':
     main()
index afa1d5c707afa09e0e1a52094987d389da75c96d..f1012a998109bb783f563c4c34acb4bd23b68a92 100644 (file)
@@ -209,9 +209,6 @@ class TestExceptionInGreenthread(LimitedTestCase):
 class TestHubSelection(LimitedTestCase):
 
     def test_explicit_hub(self):
-        if getattr(hubs.get_hub(), 'uses_twisted_reactor', None):
-            # doesn't work with twisted
-            return
         oldhub = hubs.get_hub()
         try:
             hubs.use_hub(Foo)
index cb3390b81257c5756b697f96cb23e9ca08ffed96..9117b1010d35cf1107c11f7202aee46ee2bbe5ca 100644 (file)
@@ -6,27 +6,27 @@ import os
 __test__ = False
 _proc_status = '/proc/%d/status' % os.getpid()
 
-_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
-          'KB': 1024.0, 'MB': 1024.0*1024.0}
+_scale = {'kB': 1024.0, 'mB': 1024.0 * 1024.0,
+          'KB': 1024.0, 'MB': 1024.0 * 1024.0}
 
 
 def _VmB(VmKey):
     '''Private.
     '''
     global _proc_status, _scale
-     # get pseudo file  /proc/<pid>/status
+    # get pseudo file  /proc/<pid>/status
     try:
         t = open(_proc_status)
         v = t.read()
         t.close()
     except:
         return 0.0  # non-Linux?
-     # get VmKey line e.g. 'VmRSS:  9999  kB\n ...'
+    # get VmKey line e.g. 'VmRSS:  9999  kB\n ...'
     i = v.index(VmKey)
     v = v[i:].split(None, 3)  # whitespace
     if len(v) < 3:
         return 0.0  # invalid format?
-     # convert Vm value to bytes
+    # convert Vm value to bytes
     return float(v[1]) * _scale[v[2]]
 
 
index 947c0db66349bef43ccc8e1d6b89ce682070865c..7bb567ac3149ed4640a8e64c1545d5443f063e62 100644 (file)
@@ -306,7 +306,7 @@ def _set_signature(mock, original, instance=False):
     src = """def %s(*args, **kwargs):
     _checksig_(*args, **kwargs)
     return mock(*args, **kwargs)""" % name
-    exec (src, context)
+    exec(src, context)
     funcopy = context[name]
     _setup_func(funcopy, mock)
     return funcopy
@@ -336,7 +336,7 @@ def _setup_func(funcopy, mock):
         funcopy.mock_calls = _CallList()
         mock.reset_mock()
         ret = funcopy.return_value
-        if _is_instance_mock(ret) and not ret is mock:
+        if _is_instance_mock(ret) and ret is not mock:
             ret.reset_mock()
 
     funcopy.called = False
@@ -461,8 +461,8 @@ def _check_and_set_parent(parent, value, name, new_name):
     if not _is_instance_mock(value):
         return False
     if ((value._mock_name or value._mock_new_name) or
-        (value._mock_parent is not None) or
-        (value._mock_new_parent is not None)):
+            (value._mock_parent is not None) or
+            (value._mock_new_parent is not None)):
         return False
 
     _parent = parent
@@ -502,10 +502,10 @@ class NonCallableMock(Base):
         return instance
 
     def __init__(
-            self, spec=None, wraps=None, name=None, spec_set=None,
-            parent=None, _spec_state=None, _new_name='', _new_parent=None,
-            **kwargs
-        ):
+        self, spec=None, wraps=None, name=None, spec_set=None,
+        parent=None, _spec_state=None, _new_name='', _new_parent=None,
+        **kwargs
+    ):
         if _new_parent is None:
             _new_parent = parent
 
@@ -768,8 +768,8 @@ class NonCallableMock(Base):
             # property setters go through here
             return object.__setattr__(self, name, value)
         elif (self._spec_set and self._mock_methods is not None and
-            name not in self._mock_methods and
-            name not in self.__dict__):
+              name not in self._mock_methods and
+              name not in self.__dict__):
             raise AttributeError("Mock object has no attribute '%s'" % name)
         elif name in _unsupported_magics:
             msg = 'Attempting to set unsupported magic method %r.' % name
@@ -1018,7 +1018,7 @@ class CallableMixin(Base):
                 ret_val = self.return_value
 
         if (self._mock_wraps is not None and
-             self._mock_return_value is DEFAULT):
+                self._mock_return_value is DEFAULT):
             return self._mock_wraps(*args, **kwargs)
         if ret_val is DEFAULT:
             ret_val = self.return_value
@@ -1110,9 +1110,9 @@ class _patch(object):
     _active_patches = set()
 
     def __init__(
-            self, getter, attribute, new, spec, create,
-            spec_set, autospec, new_callable, kwargs
-        ):
+        self, getter, attribute, new, spec, create,
+        spec_set, autospec, new_callable, kwargs
+    ):
         if new_callable is not None:
             if new is not DEFAULT:
                 raise ValueError(
@@ -1193,7 +1193,7 @@ class _patch(object):
                     return func(*args, **keywargs)
                 except:
                     if (patching not in entered_patchers and
-                        _is_started(patching)):
+                            _is_started(patching)):
                         # the patcher may have been started, but an exception
                         # raised whilst entering one of its additional_patchers
                         entered_patchers.append(patching)
@@ -1252,7 +1252,7 @@ class _patch(object):
         if spec is not None and autospec is not None:
             raise TypeError("Can't specify spec and autospec")
         if ((spec is not None or autospec is not None) and
-            spec_set not in (True, None)):
+                spec_set not in (True, None)):
             raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
 
         original, local = self.get_original()
@@ -1301,7 +1301,7 @@ class _patch(object):
 
             # add a name to mocks
             if (isinstance(Klass, type) and
-                issubclass(Klass, NonCallableMock) and self.attribute):
+                    issubclass(Klass, NonCallableMock) and self.attribute):
                 _kwargs['name'] = self.attribute
 
             _kwargs.update(kwargs)
@@ -1314,7 +1314,7 @@ class _patch(object):
                 if spec_set is not None:
                     this_spec = spec_set
                 if (not _is_list(this_spec) and not
-                    _instance_callable(this_spec)):
+                        _instance_callable(this_spec)):
                     Klass = NonCallableMagicMock
 
                 _kwargs.pop('name')
@@ -1402,10 +1402,10 @@ def _get_target(target):
 
 
 def _patch_object(
-        target, attribute, new=DEFAULT, spec=None,
-        create=False, spec_set=None, autospec=None,
-        new_callable=None, **kwargs
-    ):
+    target, attribute, new=DEFAULT, spec=None,
+    create=False, spec_set=None, autospec=None,
+    new_callable=None, **kwargs
+):
     """
     patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
                  spec_set=None, autospec=None, new_callable=None, **kwargs)
@@ -1479,9 +1479,9 @@ def _patch_multiple(target, spec=None, create=False, spec_set=None,
 
 
 def patch(
-        target, new=DEFAULT, spec=None, create=False,
-        spec_set=None, autospec=None, new_callable=None, **kwargs
-    ):
+    target, new=DEFAULT, spec=None, create=False,
+    spec_set=None, autospec=None, new_callable=None, **kwargs
+):
     """
     `patch` acts as a function decorator, class decorator or a context
     manager. Inside the body of the function or with statement, the `target`
@@ -1611,7 +1611,7 @@ class _patch_dict(object):
         for attr in dir(klass):
             attr_value = getattr(klass, attr)
             if (attr.startswith(patch.TEST_PREFIX) and
-                 hasattr(attr_value, "__call__")):
+                    hasattr(attr_value, "__call__")):
                 decorator = _patch_dict(self.in_dict, self.values, self.clear)
                 decorated = decorator(attr_value)
                 setattr(klass, attr, decorated)
index 173bad365b85e33496e1fa54c69283dbd3336d0e..e5a87d3e739cb79597fe60a1c8255515f69de4c2 100644 (file)
@@ -42,8 +42,6 @@ def mysql_requirement(_f):
 
 class TestMySQLdb(LimitedTestCase):
     def setUp(self):
-        super(TestMySQLdb, self).setUp()
-
         self._auth = get_database_auth()['MySQLdb']
         self.create_db()
         self.connection = None
@@ -56,6 +54,8 @@ class TestMySQLdb(LimitedTestCase):
         self.connection.commit()
         cursor.close()
 
+        super(TestMySQLdb, self).setUp()
+
     def tearDown(self):
         if self.connection:
             self.connection.close()
index 2d9c0e7e5deb66d92f21deffa12f20e852a23a6a..a76178e7520170c1e1a1c5c26efd578952077ec2 100644 (file)
@@ -1,12 +1,12 @@
 from __future__ import print_function
-import MySQLdb as m
 from eventlet import patcher
-from eventlet.green import MySQLdb as gm
 
 # no standard tests in this file, ignore
 __test__ = False
 
 if __name__ == '__main__':
+    import MySQLdb as m
+    from eventlet.green import MySQLdb as gm
     patcher.monkey_patch(all=True, MySQLdb=True)
     print("mysqltest {0}".format(",".join(sorted(patcher.already_patched.keys()))))
     print("connect {0}".format(m.connect == gm.connect))
index 53455d3099c4a35e441bf557240cd4907bf7910a..18bd5e321c3e0506bb88db0e47609a6831eab52c 100644 (file)
@@ -39,7 +39,8 @@ def parse_unittest_output(s):
     num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1))
     ok = re.search('^OK$', s, re.M)
     error, fail, timeout = 0, 0, 0
-    failed_match = re.search(r'^FAILED \((?:failures=(?P<f>\d+))?,? ?(?:errors=(?P<e>\d+))?\)$', s, re.M)
+    failed_match = re.search(
+        r'^FAILED \((?:failures=(?P<f>\d+))?,? ?(?:errors=(?P<e>\d+))?\)$', s, re.M)
     ok_match = re.search('^OK$', s, re.M)
     if failed_match:
         assert not ok_match, (ok_match, s)
index c575a80f9389c96e5c807a0e347c384295018c62..5c9076f4459192e8cd7b45ed7d9457a54473de82 100644 (file)
@@ -3,12 +3,16 @@ import shutil
 import sys
 import tempfile
 
+from eventlet.support import six
 from tests import LimitedTestCase, main, run_python, skip_with_pyevent
 
 
 base_module_contents = """
 import socket
-import urllib
+try:
+    import urllib.request as urllib
+except ImportError:
+    import urllib
 print("base {0} {1}".format(socket, urllib))
 """
 
@@ -45,14 +49,18 @@ class ProcessBase(LimitedTestCase):
         filename = os.path.join(self.tempdir, name)
         if not filename.endswith('.py'):
             filename = filename + '.py'
-        fd = open(filename, "wb")
-        fd.write(contents)
-        fd.close()
+        with open(filename, "w") as fd:
+            fd.write(contents)
 
     def launch_subprocess(self, filename):
         path = os.path.join(self.tempdir, filename)
         output = run_python(path)
-        lines = output.split("\n")
+        if six.PY3:
+            output = output.decode('utf-8')
+            separator = '\n'
+        else:
+            separator = b'\n'
+        lines = output.split(separator)
         return output, lines
 
     def run_script(self, contents, modname=None):
@@ -98,7 +106,10 @@ class MonkeyPatch(ProcessBase):
 from eventlet import patcher
 patcher.monkey_patch()
 import socket
-import urllib
+try:
+    import urllib.request as urllib
+except ImportError:
+    import urllib
 print("newmod {0} {1}".format(socket.socket, urllib.socket.socket))
 """
         self.write_to_tempfile("newmod", new_mod)
@@ -481,5 +492,10 @@ t2.join()
         self.assertEqual(lines[1], "True", lines[1])
 
 
+def test_importlib_lock():
+    output = run_python('tests/patcher_test_importlib_lock.py')
+    assert output.rstrip() == b'ok'
+
+
 if __name__ == '__main__':
     main()
diff --git a/eventlet/tests/patcher_test_importlib_lock.py b/eventlet/tests/patcher_test_importlib_lock.py
new file mode 100644 (file)
index 0000000..8f7cea7
--- /dev/null
@@ -0,0 +1,30 @@
+from __future__ import print_function
+
+import sys
+
+import eventlet
+
+
+# no standard tests in this file, ignore
+__test__ = False
+
+
+def do_import():
+    import encodings.idna
+
+
+if __name__ == '__main__':
+    eventlet.monkey_patch()
+    threading = eventlet.patcher.original('threading')
+
+    sys.modules.pop('encodings.idna', None)
+
+    # call "import encodings.idna" in a new thread
+    thread = threading.Thread(target=do_import)
+    thread.start()
+
+    # call "import encodings.idna" in the main thread
+    do_import()
+
+    thread.join()
+    print('ok')
diff --git a/eventlet/tests/processes_test.py b/eventlet/tests/processes_test.py
deleted file mode 100644 (file)
index 889c824..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-import sys
-import warnings
-from tests import LimitedTestCase, main, skip_on_windows
-
-warnings.simplefilter('ignore', DeprecationWarning)
-from eventlet import processes, api
-warnings.simplefilter('default', DeprecationWarning)
-
-
-class TestEchoPool(LimitedTestCase):
-    def setUp(self):
-        super(TestEchoPool, self).setUp()
-        self.pool = processes.ProcessPool('echo', ["hello"])
-
-    @skip_on_windows
-    def test_echo(self):
-        result = None
-
-        proc = self.pool.get()
-        try:
-            result = proc.read()
-        finally:
-            self.pool.put(proc)
-        self.assertEqual(result, 'hello\n')
-
-    @skip_on_windows
-    def test_read_eof(self):
-        proc = self.pool.get()
-        try:
-            proc.read()
-            self.assertRaises(processes.DeadProcess, proc.read)
-        finally:
-            self.pool.put(proc)
-
-    @skip_on_windows
-    def test_empty_echo(self):
-        p = processes.Process('echo', ['-n'])
-        self.assertEqual('', p.read())
-        self.assertRaises(processes.DeadProcess, p.read)
-
-
-class TestCatPool(LimitedTestCase):
-    def setUp(self):
-        super(TestCatPool, self).setUp()
-        api.sleep(0)
-        self.pool = processes.ProcessPool('cat')
-
-    @skip_on_windows
-    def test_cat(self):
-        result = None
-
-        proc = self.pool.get()
-        try:
-            proc.write('goodbye')
-            proc.close_stdin()
-            result = proc.read()
-        finally:
-            self.pool.put(proc)
-
-        self.assertEqual(result, 'goodbye')
-
-    @skip_on_windows
-    def test_write_to_dead(self):
-        result = None
-
-        proc = self.pool.get()
-        try:
-            proc.write('goodbye')
-            proc.close_stdin()
-            result = proc.read()
-            self.assertRaises(processes.DeadProcess, proc.write, 'foo')
-        finally:
-            self.pool.put(proc)
-
-    @skip_on_windows
-    def test_close(self):
-        result = None
-
-        proc = self.pool.get()
-        try:
-            proc.write('hello')
-            proc.close()
-            self.assertRaises(processes.DeadProcess, proc.write, 'goodbye')
-        finally:
-            self.pool.put(proc)
-
-
-class TestDyingProcessesLeavePool(LimitedTestCase):
-    def setUp(self):
-        super(TestDyingProcessesLeavePool, self).setUp()
-        self.pool = processes.ProcessPool('echo', ['hello'], max_size=1)
-
-    @skip_on_windows
-    def test_dead_process_not_inserted_into_pool(self):
-        proc = self.pool.get()
-        try:
-            try:
-                result = proc.read()
-                self.assertEqual(result, 'hello\n')
-                result = proc.read()
-            except processes.DeadProcess:
-                pass
-        finally:
-            self.pool.put(proc)
-        proc2 = self.pool.get()
-        assert proc is not proc2
-
-
-if __name__ == '__main__':
-    main()
index b5522a0bdf123eb92e9616dbb17406521f846171..ecbe85b1a4006a63e769759e6410d4c29eb96b37 100644 (file)
@@ -218,7 +218,8 @@ class TestQueue(LimitedTestCase):
 
         self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world'], events)
         eventlet.sleep(0)
-        self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world', 'sent world'], events)
+        self.assertEqual(
+            ['waiting', 'sending hello', 'hello', 'sending world', 'world', 'sent world'], events)
 
     def test_channel_waiters(self):
         c = eventlet.Queue(0)
diff --git a/eventlet/tests/socket_test.py b/eventlet/tests/socket_test.py
new file mode 100644 (file)
index 0000000..60881f7
--- /dev/null
@@ -0,0 +1,8 @@
+from eventlet.green import socket
+
+
+def test_create_connection_error():
+    try:
+        socket.create_connection(('192.0.2.1', 80), timeout=0.1)
+    except (IOError, OSError):
+        pass
index 73b9b431116cae61630044d74dc1d2cf3784047f..621ff4721041bc559bed7f51f6438f50ecb5bb10 100644 (file)
@@ -3,9 +3,9 @@ import warnings
 from unittest import main
 
 import eventlet
-from eventlet import util, greenio
+from eventlet import greenio
 try:
-    from eventlet.green.socket import ssl
+    from eventlet.green import ssl
 except ImportError:
     pass
 from tests import (
@@ -15,8 +15,8 @@ from tests import (
 
 
 def listen_ssl_socket(address=('127.0.0.1', 0)):
-    sock = util.wrap_ssl(socket.socket(), certificate_file,
-                         private_key_file, True)
+    sock = ssl.wrap_socket(
+        socket.socket(), private_key_file, certificate_file, server_side=True)
     sock.bind(address)
     sock.listen(50)
 
@@ -44,7 +44,8 @@ class SSLTest(LimitedTestCase):
 
         server_coro = eventlet.spawn(serve, sock)
 
-        client = util.wrap_ssl(eventlet.connect(('127.0.0.1', sock.getsockname()[1])))
+        client = ssl.wrap_socket(
+            eventlet.connect(('127.0.0.1', sock.getsockname()[1])))
         client.write(b'line 1\r\nline 2\r\n\r\n')
         self.assertEqual(client.read(8192), b'response')
         server_coro.wait()
@@ -64,7 +65,7 @@ class SSLTest(LimitedTestCase):
         server_coro = eventlet.spawn(serve, sock)
 
         raw_client = eventlet.connect(('127.0.0.1', sock.getsockname()[1]))
-        client = util.wrap_ssl(raw_client)
+        client = ssl.wrap_socket(raw_client)
         client.write(b'X')
         greenio.shutdown_safe(client)
         client.close()
@@ -79,7 +80,7 @@ class SSLTest(LimitedTestCase):
         server_coro = eventlet.spawn(serve, sock)
 
         raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        ssl_client = util.wrap_ssl(raw_client)
+        ssl_client = ssl.wrap_socket(raw_client)
         ssl_client.connect(('127.0.0.1', sock.getsockname()[1]))
         ssl_client.write(b'abc')
         greenio.shutdown_safe(ssl_client)
@@ -91,8 +92,8 @@ class SSLTest(LimitedTestCase):
         def serve():
             sock, addr = listener.accept()
             self.assertEqual(sock.recv(6), b'before')
-            sock_ssl = util.wrap_ssl(sock, certificate_file, private_key_file,
-                                     server_side=True)
+            sock_ssl = ssl.wrap_socket(sock, private_key_file, certificate_file,
+                                       server_side=True)
             sock_ssl.do_handshake()
             self.assertEqual(sock_ssl.read(6), b'during')
             sock2 = sock_ssl.unwrap()
@@ -103,7 +104,7 @@ class SSLTest(LimitedTestCase):
         server_coro = eventlet.spawn(serve)
         client = eventlet.connect((listener.getsockname()))
         client.send(b'before')
-        client_ssl = util.wrap_ssl(client)
+        client_ssl = ssl.wrap_socket(client)
         client_ssl.do_handshake()
         client_ssl.write(b'during')
         client2 = client_ssl.unwrap()
@@ -142,7 +143,7 @@ class SSLTest(LimitedTestCase):
 
         client_sock = eventlet.connect(server_sock.getsockname())
         client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFFER_SIZE)
-        client = util.wrap_ssl(client_sock)
+        client = ssl.wrap_socket(client_sock)
         client.write(b'request')
         self.assertEqual(client.read(8), b'response')
         stage_1.send()
@@ -159,7 +160,8 @@ class SSLTest(LimitedTestCase):
             sock.close()
         listener = listen_ssl_socket(('', 0))
         eventlet.spawn(serve, listener)
-        client = ssl(eventlet.connect(('localhost', listener.getsockname()[1])))
+        client = ssl.wrap_socket(
+            eventlet.connect(('localhost', listener.getsockname()[1])))
         self.assertEqual(client.read(1024), b'content')
         self.assertEqual(client.read(1024), b'')
 
@@ -176,7 +178,7 @@ class SSLTest(LimitedTestCase):
 
         listener = listen_ssl_socket(('', 0))
         eventlet.spawn(serve, listener)
-        ssl(eventlet.connect(('localhost', listener.getsockname()[1])))
+        ssl.wrap_socket(eventlet.connect(('localhost', listener.getsockname()[1])))
 
 if __name__ == '__main__':
     main()
index f3202d7cf42b7d5c6c1187d91c53de23f472f0ce..3d05d1e6d6eb710a012127ca474de2d63559deb4 100644 (file)
@@ -1,6 +1,13 @@
-""" Convenience module for running standard library tests with nose.  The standard tests are not especially homogeneous, but they mostly expose a test_main method that does the work of selecting which tests to run based on what is supported by the platform.  On its own, Nose would run all possible tests and many would fail; therefore we collect all of the test_main methods here in one module and Nose can run it.  Hopefully in the future the standard tests get rewritten to be more nosey.
+""" Convenience module for running standard library tests with nose.  The standard
+tests are not especially homogeneous, but they mostly expose a test_main method that
+does the work of selecting which tests to run based on what is supported by the
+platform.  On its own, Nose would run all possible tests and many would fail; therefore
+we collect all of the test_main methods here in one module and Nose can run it.
 
-Many of these tests make connections to external servers, and all.py tries to skip these tests rather than failing them, so you can get some work done on a plane.
+Hopefully in the future the standard tests get rewritten to be more nosey.
+
+Many of these tests make connections to external servers, and all.py tries to skip these
+tests rather than failing them, so you can get some work done on a plane.
 """
 
 from eventlet import debug
index 02bd95c75a759ab4555eea5ba8ae9fb7b0a32482..9e99dc475ef0deaebb26103d8c748a7f73745484 100644 (file)
@@ -1,15 +1,15 @@
 """Test that BoundedSemaphore with a very high bound is as good as unbounded one"""
-from eventlet import coros
+from eventlet import semaphore
 from eventlet.green import thread
 
 
 def allocate_lock():
-    return coros.semaphore(1, 9999)
+    return semaphore.Semaphore(1, 9999)
 
 original_allocate_lock = thread.allocate_lock
 thread.allocate_lock = allocate_lock
 original_LockType = thread.LockType
-thread.LockType = coros.CappedSemaphore
+thread.LockType = semaphore.CappedSemaphore
 
 try:
     import os.path
index 08cd8a254ad6cb563407aa8de113bfe95b06b5e3..edbdd21f882c9479e77a266a607526821fd7815f 100644 (file)
@@ -9,8 +9,10 @@ patcher.inject(
     ('urllib2', urllib2))
 
 HandlerTests.test_file = patcher.patch_function(HandlerTests.test_file, ('socket', socket))
-HandlerTests.test_cookie_redirect = patcher.patch_function(HandlerTests.test_cookie_redirect, ('urllib2', urllib2))
-OpenerDirectorTests.test_badly_named_methods = patcher.patch_function(OpenerDirectorTests.test_badly_named_methods, ('urllib2', urllib2))
+HandlerTests.test_cookie_redirect = patcher.patch_function(
+    HandlerTests.test_cookie_redirect, ('urllib2', urllib2))
+OpenerDirectorTests.test_badly_named_methods = patcher.patch_function(
+    OpenerDirectorTests.test_badly_named_methods, ('urllib2', urllib2))
 
 if __name__ == "__main__":
     test_main()
index 5528bc462c18e47b42a992eef806288b7aef3ea1..6964a742b85306172015789bbd5bacc6adb0de79 100644 (file)
@@ -12,13 +12,16 @@ def test_subprocess_wait():
     # In Python 3.3 subprocess.Popen.wait() method acquired `timeout`
     # argument.
     # RHEL backported it to their Python 2.6 package.
-    p = subprocess.Popen(
-        [sys.executable, "-c", "import time; time.sleep(0.5)"])
+    cmd = [sys.executable, "-c", "import time; time.sleep(0.5)"]
+    p = subprocess.Popen(cmd)
     ok = False
     t1 = time.time()
     try:
         p.wait(timeout=0.1)
-    except subprocess.TimeoutExpired:
+    except subprocess.TimeoutExpired as e:
+        str(e)  # make sure it doesnt throw
+        assert e.cmd == cmd
+        assert e.timeout == 0.1
         ok = True
     tdiff = time.time() - t1
     assert ok, 'did not raise subprocess.TimeoutExpired'
diff --git a/eventlet/tests/test__coros_queue.py b/eventlet/tests/test__coros_queue.py
deleted file mode 100644 (file)
index d4ee257..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-from tests import LimitedTestCase, silence_warnings
-from unittest import main
-import eventlet
-from eventlet import coros, spawn, sleep
-from eventlet.event import Event
-
-
-class TestQueue(LimitedTestCase):
-
-    @silence_warnings
-    def test_send_first(self):
-        q = coros.queue()
-        q.send('hi')
-        self.assertEqual(q.wait(), 'hi')
-
-    @silence_warnings
-    def test_send_exception_first(self):
-        q = coros.queue()
-        q.send(exc=RuntimeError())
-        self.assertRaises(RuntimeError, q.wait)
-
-    @silence_warnings
-    def test_send_last(self):
-        q = coros.queue()
-
-        def waiter(q):
-            timer = eventlet.Timeout(0.1)
-            self.assertEqual(q.wait(), 'hi2')
-            timer.cancel()
-
-        spawn(waiter, q)
-        sleep(0)
-        sleep(0)
-        q.send('hi2')
-
-    @silence_warnings
-    def test_max_size(self):
-        q = coros.queue(2)
-        results = []
-
-        def putter(q):
-            q.send('a')
-            results.append('a')
-            q.send('b')
-            results.append('b')
-            q.send('c')
-            results.append('c')
-
-        spawn(putter, q)
-        sleep(0)
-        self.assertEqual(results, ['a', 'b'])
-        self.assertEqual(q.wait(), 'a')
-        sleep(0)
-        self.assertEqual(results, ['a', 'b', 'c'])
-        self.assertEqual(q.wait(), 'b')
-        self.assertEqual(q.wait(), 'c')
-
-    @silence_warnings
-    def test_zero_max_size(self):
-        q = coros.queue(0)
-
-        def sender(evt, q):
-            q.send('hi')
-            evt.send('done')
-
-        def receiver(evt, q):
-            x = q.wait()
-            evt.send(x)
-
-        e1 = Event()
-        e2 = Event()
-
-        spawn(sender, e1, q)
-        sleep(0)
-        assert not e1.ready()
-        spawn(receiver, e2, q)
-        self.assertEqual(e2.wait(), 'hi')
-        self.assertEqual(e1.wait(), 'done')
-
-    @silence_warnings
-    def test_multiple_waiters(self):
-        # tests that multiple waiters get their results back
-        q = coros.queue()
-
-        sendings = ['1', '2', '3', '4']
-        gts = [eventlet.spawn(q.wait)
-               for x in sendings]
-
-        eventlet.sleep(0.01)  # get 'em all waiting
-
-        q.send(sendings[0])
-        q.send(sendings[1])
-        q.send(sendings[2])
-        q.send(sendings[3])
-        results = set()
-        for i, gt in enumerate(gts):
-            results.add(gt.wait())
-        self.assertEqual(results, set(sendings))
-
-    @silence_warnings
-    def test_waiters_that_cancel(self):
-        q = coros.queue()
-
-        def do_receive(q, evt):
-            eventlet.Timeout(0, RuntimeError())
-            try:
-                result = q.wait()
-                evt.send(result)
-            except RuntimeError:
-                evt.send('timed out')
-
-        evt = Event()
-        spawn(do_receive, q, evt)
-        self.assertEqual(evt.wait(), 'timed out')
-
-        q.send('hi')
-        self.assertEqual(q.wait(), 'hi')
-
-    @silence_warnings
-    def test_senders_that_die(self):
-        q = coros.queue()
-
-        def do_send(q):
-            q.send('sent')
-
-        spawn(do_send, q)
-        self.assertEqual(q.wait(), 'sent')
-
-    @silence_warnings
-    def test_two_waiters_one_dies(self):
-        def waiter(q, evt):
-            evt.send(q.wait())
-
-        def do_receive(q, evt):
-            eventlet.Timeout(0, RuntimeError())
-            try:
-                result = q.wait()
-                evt.send(result)
-            except RuntimeError:
-                evt.send('timed out')
-
-        q = coros.queue()
-        dying_evt = Event()
-        waiting_evt = Event()
-        spawn(do_receive, q, dying_evt)
-        spawn(waiter, q, waiting_evt)
-        sleep(0)
-        q.send('hi')
-        self.assertEqual(dying_evt.wait(), 'timed out')
-        self.assertEqual(waiting_evt.wait(), 'hi')
-
-    @silence_warnings
-    def test_two_bogus_waiters(self):
-        def do_receive(q, evt):
-            eventlet.Timeout(0, RuntimeError())
-            try:
-                result = q.wait()
-                evt.send(result)
-            except RuntimeError:
-                evt.send('timed out')
-
-        q = coros.queue()
-        e1 = Event()
-        e2 = Event()
-        spawn(do_receive, q, e1)
-        spawn(do_receive, q, e2)
-        sleep(0)
-        q.send('sent')
-        self.assertEqual(e1.wait(), 'timed out')
-        self.assertEqual(e2.wait(), 'timed out')
-        self.assertEqual(q.wait(), 'sent')
-
-    @silence_warnings
-    def test_waiting(self):
-        def do_wait(q, evt):
-            result = q.wait()
-            evt.send(result)
-
-        q = coros.queue()
-        e1 = Event()
-        spawn(do_wait, q, e1)
-        sleep(0)
-        self.assertEqual(1, q.waiting())
-        q.send('hi')
-        sleep(0)
-        self.assertEqual(0, q.waiting())
-        self.assertEqual('hi', e1.wait())
-        self.assertEqual(0, q.waiting())
-
-
-class TestChannel(LimitedTestCase):
-
-    @silence_warnings
-    def test_send(self):
-        sleep(0.1)
-        channel = coros.queue(0)
-
-        events = []
-
-        def another_greenlet():
-            events.append(channel.wait())
-            events.append(channel.wait())
-
-        spawn(another_greenlet)
-
-        events.append('sending')
-        channel.send('hello')
-        events.append('sent hello')
-        channel.send('world')
-        events.append('sent world')
-
-        self.assertEqual(['sending', 'hello', 'sent hello', 'world', 'sent world'], events)
-
-    @silence_warnings
-    def test_wait(self):
-        sleep(0.1)
-        channel = coros.queue(0)
-        events = []
-
-        def another_greenlet():
-            events.append('sending hello')
-            channel.send('hello')
-            events.append('sending world')
-            channel.send('world')
-            events.append('sent world')
-
-        spawn(another_greenlet)
-
-        events.append('waiting')
-        events.append(channel.wait())
-        events.append(channel.wait())
-
-        self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world'], events)
-        sleep(0)
-        self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world', 'sent world'], events)
-
-    @silence_warnings
-    def test_waiters(self):
-        c = coros.Channel()
-        w1 = eventlet.spawn(c.wait)
-        w2 = eventlet.spawn(c.wait)
-        w3 = eventlet.spawn(c.wait)
-        sleep(0)
-        self.assertEqual(c.waiting(), 3)
-        s1 = eventlet.spawn(c.send, 1)
-        s2 = eventlet.spawn(c.send, 2)
-        s3 = eventlet.spawn(c.send, 3)
-        sleep(0)  # this gets all the sends into a waiting state
-        self.assertEqual(c.waiting(), 0)
-
-        s1.wait()
-        s2.wait()
-        s3.wait()
-        # NOTE: we don't guarantee that waiters are served in order
-        results = sorted([w1.wait(), w2.wait(), w3.wait()])
-        self.assertEqual(results, [1, 2, 3])
-
-if __name__ == '__main__':
-    main()
index 8bc721963c286f598a048ffa394cf2d3ac52d160..065e13bde5c4218469e79b8bdf71d5c19c35c408 100644 (file)
@@ -1,6 +1,6 @@
 import unittest
+from eventlet import spawn, sleep, with_timeout
 from eventlet.event import Event
-from eventlet.api import spawn, sleep, with_timeout
 import eventlet
 from tests import LimitedTestCase
 
diff --git a/eventlet/tests/test__pool.py b/eventlet/tests/test__pool.py
deleted file mode 100644 (file)
index 4191625..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-import eventlet
-import warnings
-warnings.simplefilter('ignore', DeprecationWarning)
-from eventlet import pool, coros, api, hubs, timeout
-warnings.simplefilter('default', DeprecationWarning)
-from eventlet import event as _event
-from eventlet.support import six
-from tests import LimitedTestCase
-from unittest import main
-
-
-class TestCoroutinePool(LimitedTestCase):
-    klass = pool.Pool
-
-    def test_execute_async(self):
-        done = _event.Event()
-
-        def some_work():
-            done.send()
-        pool = self.klass(0, 2)
-        pool.execute_async(some_work)
-        done.wait()
-
-    def test_execute(self):
-        value = 'return value'
-
-        def some_work():
-            return value
-        pool = self.klass(0, 2)
-        worker = pool.execute(some_work)
-        self.assertEqual(value, worker.wait())
-
-    def test_waiting(self):
-        pool = self.klass(0, 1)
-        done = _event.Event()
-
-        def consume():
-            done.wait()
-
-        def waiter(pool):
-            evt = pool.execute(consume)
-            evt.wait()
-
-        waiters = []
-        waiters.append(eventlet.spawn(waiter, pool))
-        api.sleep(0)
-        self.assertEqual(pool.waiting(), 0)
-        waiters.append(eventlet.spawn(waiter, pool))
-        api.sleep(0)
-        self.assertEqual(pool.waiting(), 1)
-        waiters.append(eventlet.spawn(waiter, pool))
-        api.sleep(0)
-        self.assertEqual(pool.waiting(), 2)
-        done.send(None)
-        for w in waiters:
-            w.wait()
-        self.assertEqual(pool.waiting(), 0)
-
-    def test_multiple_coros(self):
-        evt = _event.Event()
-        results = []
-
-        def producer():
-            results.append('prod')
-            evt.send()
-
-        def consumer():
-            results.append('cons1')
-            evt.wait()
-            results.append('cons2')
-
-        pool = self.klass(0, 2)
-        done = pool.execute(consumer)
-        pool.execute_async(producer)
-        done.wait()
-        self.assertEqual(['cons1', 'prod', 'cons2'], results)
-
-    def test_timer_cancel(self):
-        # this test verifies that local timers are not fired
-        # outside of the context of the execute method
-        timer_fired = []
-
-        def fire_timer():
-            timer_fired.append(True)
-
-        def some_work():
-            hubs.get_hub().schedule_call_local(0, fire_timer)
-        pool = self.klass(0, 2)
-        worker = pool.execute(some_work)
-        worker.wait()
-        api.sleep(0)
-        self.assertEqual(timer_fired, [])
-
-    def test_reentrant(self):
-        pool = self.klass(0, 1)
-
-        def reenter():
-            waiter = pool.execute(lambda a: a, 'reenter')
-            self.assertEqual('reenter', waiter.wait())
-
-        outer_waiter = pool.execute(reenter)
-        outer_waiter.wait()
-
-        evt = _event.Event()
-
-        def reenter_async():
-            pool.execute_async(lambda a: a, 'reenter')
-            evt.send('done')
-
-        pool.execute_async(reenter_async)
-        evt.wait()
-
-    def assert_pool_has_free(self, pool, num_free):
-        def wait_long_time(e):
-            e.wait()
-        timer = timeout.Timeout(1, api.TimeoutError)
-        try:
-            evt = _event.Event()
-            for x in six.moves.range(num_free):
-                pool.execute(wait_long_time, evt)
-                # if the pool has fewer free than we expect,
-                # then we'll hit the timeout error
-        finally:
-            timer.cancel()
-
-        # if the runtime error is not raised it means the pool had
-        # some unexpected free items
-        timer = timeout.Timeout(0, RuntimeError)
-        self.assertRaises(RuntimeError, pool.execute, wait_long_time, evt)
-
-        # clean up by causing all the wait_long_time functions to return
-        evt.send(None)
-        api.sleep(0)
-        api.sleep(0)
-
-    def test_resize(self):
-        pool = self.klass(max_size=2)
-        evt = _event.Event()
-
-        def wait_long_time(e):
-            e.wait()
-        pool.execute(wait_long_time, evt)
-        pool.execute(wait_long_time, evt)
-        self.assertEqual(pool.free(), 0)
-        self.assert_pool_has_free(pool, 0)
-
-        # verify that the pool discards excess items put into it
-        pool.resize(1)
-
-        # cause the wait_long_time functions to return, which will
-        # trigger puts to the pool
-        evt.send(None)
-        api.sleep(0)
-        api.sleep(0)
-
-        self.assertEqual(pool.free(), 1)
-        self.assert_pool_has_free(pool, 1)
-
-        # resize larger and assert that there are more free items
-        pool.resize(2)
-        self.assertEqual(pool.free(), 2)
-        self.assert_pool_has_free(pool, 2)
-
-    def test_stderr_raising(self):
-        # testing that really egregious errors in the error handling code
-        # (that prints tracebacks to stderr) don't cause the pool to lose
-        # any members
-        import sys
-        pool = self.klass(min_size=1, max_size=1)
-
-        def crash(*args, **kw):
-            raise RuntimeError("Whoa")
-
-        class FakeFile(object):
-            write = crash
-
-        # we're going to do this by causing the traceback.print_exc in
-        # safe_apply to raise an exception and thus exit _main_loop
-        normal_err = sys.stderr
-        try:
-            sys.stderr = FakeFile()
-            waiter = pool.execute(crash)
-            self.assertRaises(RuntimeError, waiter.wait)
-            # the pool should have something free at this point since the
-            # waiter returned
-            # pool.Pool change: if an exception is raised during execution of a link,
-            # the rest of the links are scheduled to be executed on the next hub iteration
-            # this introduces a delay in updating pool.sem which makes pool.free() report 0
-            # therefore, sleep:
-            api.sleep(0)
-            self.assertEqual(pool.free(), 1)
-            # shouldn't block when trying to get
-            t = timeout.Timeout(0.1)
-            try:
-                pool.execute(api.sleep, 1)
-            finally:
-                t.cancel()
-        finally:
-            sys.stderr = normal_err
-
-    def test_track_events(self):
-        pool = self.klass(track_events=True)
-        for x in range(6):
-            pool.execute(lambda n: n, x)
-        for y in range(6):
-            pool.wait()
-
-    def test_track_slow_event(self):
-        pool = self.klass(track_events=True)
-
-        def slow():
-            api.sleep(0.1)
-            return 'ok'
-        pool.execute(slow)
-        self.assertEqual(pool.wait(), 'ok')
-
-    def test_pool_smash(self):
-        # The premise is that a coroutine in a Pool tries to get a token out
-        # of a token pool but times out before getting the token.  We verify
-        # that neither pool is adversely affected by this situation.
-        from eventlet import pools
-        pool = self.klass(min_size=1, max_size=1)
-        tp = pools.TokenPool(max_size=1)
-        token = tp.get()  # empty pool
-
-        def do_receive(tp):
-            timeout.Timeout(0, RuntimeError())
-            try:
-                t = tp.get()
-                self.fail("Shouldn't have recieved anything from the pool")
-            except RuntimeError:
-                return 'timed out'
-
-        # the execute makes the token pool expect that coroutine, but then
-        # immediately cuts bait
-        e1 = pool.execute(do_receive, tp)
-        self.assertEqual(e1.wait(), 'timed out')
-
-        # the pool can get some random item back
-        def send_wakeup(tp):
-            tp.put('wakeup')
-        api.spawn(send_wakeup, tp)
-
-        # now we ask the pool to run something else, which should not
-        # be affected by the previous send at all
-        def resume():
-            return 'resumed'
-        e2 = pool.execute(resume)
-        self.assertEqual(e2.wait(), 'resumed')
-
-        # we should be able to get out the thing we put in there, too
-        self.assertEqual(tp.get(), 'wakeup')
-
-
-class PoolBasicTests(LimitedTestCase):
-    klass = pool.Pool
-
-    def test_execute_async(self):
-        p = self.klass(max_size=2)
-        self.assertEqual(p.free(), 2)
-        r = []
-
-        def foo(a):
-            r.append(a)
-        evt = p.execute(foo, 1)
-        self.assertEqual(p.free(), 1)
-        evt.wait()
-        self.assertEqual(r, [1])
-        api.sleep(0)
-        self.assertEqual(p.free(), 2)
-
-        # Once the pool is exhausted, calling an execute forces a yield.
-
-        p.execute_async(foo, 2)
-        self.assertEqual(1, p.free())
-        self.assertEqual(r, [1])
-
-        p.execute_async(foo, 3)
-        self.assertEqual(0, p.free())
-        self.assertEqual(r, [1])
-
-        p.execute_async(foo, 4)
-        self.assertEqual(r, [1, 2, 3])
-        api.sleep(0)
-        self.assertEqual(r, [1, 2, 3, 4])
-
-    def test_execute(self):
-        p = self.klass()
-        evt = p.execute(lambda a: ('foo', a), 1)
-        self.assertEqual(evt.wait(), ('foo', 1))
-
-    def test_with_intpool(self):
-        from eventlet import pools
-
-        class IntPool(pools.Pool):
-            def create(self):
-                self.current_integer = getattr(self, 'current_integer', 0) + 1
-                return self.current_integer
-
-        def subtest(intpool_size, pool_size, num_executes):
-            def run(int_pool):
-                token = int_pool.get()
-                api.sleep(0.0001)
-                int_pool.put(token)
-                return token
-
-            int_pool = IntPool(max_size=intpool_size)
-            pool = self.klass(max_size=pool_size)
-            for ix in six.moves.range(num_executes):
-                pool.execute(run, int_pool)
-            pool.waitall()
-
-        subtest(4, 7, 7)
-        subtest(50, 75, 100)
-        for isize in (20, 30, 40, 50):
-            for psize in (25, 35, 50):
-                subtest(isize, psize, psize)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/eventlet/tests/test__proc.py b/eventlet/tests/test__proc.py
deleted file mode 100644 (file)
index fb71a64..0000000
+++ /dev/null
@@ -1,401 +0,0 @@
-import sys
-import unittest
-import warnings
-warnings.simplefilter('ignore', DeprecationWarning)
-from eventlet import proc
-warnings.simplefilter('default', DeprecationWarning)
-from eventlet import coros
-from eventlet import event as _event
-from eventlet import Timeout, sleep, getcurrent, with_timeout
-from tests import LimitedTestCase, skipped, silence_warnings
-
-DELAY = 0.01
-
-
-class ExpectedError(Exception):
-    pass
-
-
-class TestLink_Signal(LimitedTestCase):
-
-    @silence_warnings
-    def test_send(self):
-        s = proc.Source()
-        q1, q2, q3 = coros.queue(), coros.queue(), coros.queue()
-        s.link_value(q1)
-        self.assertRaises(Timeout, s.wait, 0)
-        assert s.wait(0, None) is None
-        assert s.wait(0.001, None) is None
-        self.assertRaises(Timeout, s.wait, 0.001)
-        s.send(1)
-        assert not q1.ready()
-        assert s.wait() == 1
-        sleep(0)
-        assert q1.ready()
-        s.link_exception(q2)
-        s.link(q3)
-        assert not q2.ready()
-        sleep(0)
-        assert q3.ready()
-        assert s.wait() == 1
-
-    @silence_warnings
-    def test_send_exception(self):
-        s = proc.Source()
-        q1, q2, q3 = coros.queue(), coros.queue(), coros.queue()
-        s.link_exception(q1)
-        s.send_exception(OSError('hello'))
-        sleep(0)
-        assert q1.ready()
-        s.link_value(q2)
-        s.link(q3)
-        assert not q2.ready()
-        sleep(0)
-        assert q3.ready()
-        self.assertRaises(OSError, q1.wait)
-        self.assertRaises(OSError, q3.wait)
-        self.assertRaises(OSError, s.wait)
-
-
-class TestProc(LimitedTestCase):
-
-    def test_proc(self):
-        p = proc.spawn(lambda: 100)
-        receiver = proc.spawn(sleep, 1)
-        p.link(receiver)
-        self.assertRaises(proc.LinkedCompleted, receiver.wait)
-        receiver2 = proc.spawn(sleep, 1)
-        p.link(receiver2)
-        self.assertRaises(proc.LinkedCompleted, receiver2.wait)
-
-    def test_event(self):
-        p = proc.spawn(lambda: 100)
-        event = _event.Event()
-        p.link(event)
-        self.assertEqual(event.wait(), 100)
-
-        for i in range(3):
-            event2 = _event.Event()
-            p.link(event2)
-            self.assertEqual(event2.wait(), 100)
-
-    def test_current(self):
-        p = proc.spawn(lambda: 100)
-        p.link()
-        self.assertRaises(proc.LinkedCompleted, sleep, 0.1)
-
-
-class TestCase(LimitedTestCase):
-
-    def link(self, p, listener=None):
-        getattr(p, self.link_method)(listener)
-
-    def tearDown(self):
-        LimitedTestCase.tearDown(self)
-        self.p.unlink()
-
-    def set_links(self, p, first_time, kill_exc_type):
-        event = _event.Event()
-        self.link(p, event)
-
-        proc_flag = []
-
-        def receiver():
-            sleep(DELAY)
-            proc_flag.append('finished')
-        receiver = proc.spawn(receiver)
-        self.link(p, receiver)
-
-        queue = coros.queue(1)
-        self.link(p, queue)
-
-        try:
-            self.link(p)
-        except kill_exc_type:
-            if first_time:
-                raise
-        else:
-            assert first_time, 'not raising here only first time'
-
-        callback_flag = ['initial']
-        self.link(p, lambda *args: callback_flag.remove('initial'))
-
-        for _ in range(10):
-            self.link(p, _event.Event())
-            self.link(p, coros.queue(1))
-        return event, receiver, proc_flag, queue, callback_flag
-
-    def set_links_timeout(self, link):
-        # stuff that won't be touched
-        event = _event.Event()
-        link(event)
-
-        proc_finished_flag = []
-
-        def myproc():
-            sleep(10)
-            proc_finished_flag.append('finished')
-            return 555
-        myproc = proc.spawn(myproc)
-        link(myproc)
-
-        queue = coros.queue(0)
-        link(queue)
-        return event, myproc, proc_finished_flag, queue
-
-    def check_timed_out(self, event, myproc, proc_finished_flag, queue):
-        X = object()
-        assert with_timeout(DELAY, event.wait, timeout_value=X) is X
-        assert with_timeout(DELAY, queue.wait, timeout_value=X) is X
-        assert with_timeout(DELAY, proc.waitall, [myproc], timeout_value=X) is X
-        assert proc_finished_flag == [], proc_finished_flag
-
-
-class TestReturn_link(TestCase):
-    link_method = 'link'
-
-    def test_return(self):
-        def return25():
-            return 25
-        p = self.p = proc.spawn(return25)
-        self._test_return(p, True, 25, proc.LinkedCompleted, lambda: sleep(0))
-        # repeating the same with dead process
-        for _ in range(3):
-            self._test_return(p, False, 25, proc.LinkedCompleted, lambda: sleep(0))
-
-    def _test_return(self, p, first_time, result, kill_exc_type, action):
-        event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
-
-        # stuff that will time out because there's no unhandled exception:
-        xxxxx = self.set_links_timeout(p.link_exception)
-
-        try:
-            sleep(DELAY * 2)
-        except kill_exc_type:
-            assert first_time, 'raising here only first time'
-        else:
-            assert not first_time, 'Should not raise LinkedKilled here after first time'
-
-        assert not p, p
-
-        self.assertEqual(event.wait(), result)
-        self.assertEqual(queue.wait(), result)
-        self.assertRaises(kill_exc_type, receiver.wait)
-        self.assertRaises(kill_exc_type, proc.waitall, [receiver])
-
-        sleep(DELAY)
-        assert not proc_flag, proc_flag
-        assert not callback_flag, callback_flag
-
-        self.check_timed_out(*xxxxx)
-
-
-class TestReturn_link_value(TestReturn_link):
-    sync = False
-    link_method = 'link_value'
-
-
-class TestRaise_link(TestCase):
-    link_method = 'link'
-
-    def _test_raise(self, p, first_time, kill_exc_type):
-        event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
-        xxxxx = self.set_links_timeout(p.link_value)
-
-        try:
-            sleep(DELAY)
-        except kill_exc_type:
-            assert first_time, 'raising here only first time'
-        else:
-            assert not first_time, 'Should not raise LinkedKilled here after first time'
-
-        assert not p, p
-
-        self.assertRaises(ExpectedError, event.wait)
-        self.assertRaises(ExpectedError, queue.wait)
-        self.assertRaises(kill_exc_type, receiver.wait)
-        self.assertRaises(kill_exc_type, proc.waitall, [receiver])
-        sleep(DELAY)
-        assert not proc_flag, proc_flag
-        assert not callback_flag, callback_flag
-
-        self.check_timed_out(*xxxxx)
-
-    @silence_warnings
-    def test_raise(self):
-        p = self.p = proc.spawn(lambda: getcurrent().throw(ExpectedError('test_raise')))
-        self._test_raise(p, True, proc.LinkedFailed)
-        # repeating the same with dead process
-        for _ in range(3):
-            self._test_raise(p, False, proc.LinkedFailed)
-
-    def _test_kill(self, p, first_time, kill_exc_type):
-        event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
-        xxxxx = self.set_links_timeout(p.link_value)
-
-        p.kill()
-        try:
-            sleep(DELAY)
-        except kill_exc_type:
-            assert first_time, 'raising here only first time'
-        else:
-            assert not first_time, 'Should not raise LinkedKilled here after first time'
-
-        assert not p, p
-
-        self.assertRaises(proc.ProcExit, event.wait)
-        self.assertRaises(proc.ProcExit, queue.wait)
-        self.assertRaises(kill_exc_type, proc.waitall, [receiver])
-        self.assertRaises(kill_exc_type, receiver.wait)
-
-        sleep(DELAY)
-        assert not proc_flag, proc_flag
-        assert not callback_flag, callback_flag
-
-        self.check_timed_out(*xxxxx)
-
-    @silence_warnings
-    def test_kill(self):
-        p = self.p = proc.spawn(sleep, DELAY)
-        self._test_kill(p, True, proc.LinkedKilled)
-        # repeating the same with dead process
-        for _ in range(3):
-            self._test_kill(p, False, proc.LinkedKilled)
-
-
-class TestRaise_link_exception(TestRaise_link):
-    link_method = 'link_exception'
-
-
-class TestStuff(LimitedTestCase):
-
-    def test_wait_noerrors(self):
-        x = proc.spawn(lambda: 1)
-        y = proc.spawn(lambda: 2)
-        z = proc.spawn(lambda: 3)
-        self.assertEqual(proc.waitall([x, y, z]), [1, 2, 3])
-        e = _event.Event()
-        x.link(e)
-        self.assertEqual(e.wait(), 1)
-        x.unlink(e)
-        e = _event.Event()
-        x.link(e)
-        self.assertEqual(e.wait(), 1)
-        self.assertEqual([proc.waitall([X]) for X in [x, y, z]], [[1], [2], [3]])
-
-    # this test is timing-sensitive
-    @skipped
-    def test_wait_error(self):
-        def x():
-            sleep(DELAY)
-            return 1
-        x = proc.spawn(x)
-        z = proc.spawn(lambda: 3)
-        y = proc.spawn(lambda: getcurrent().throw(ExpectedError('test_wait_error')))
-        y.link(x)
-        x.link(y)
-        y.link(z)
-        z.link(y)
-        self.assertRaises(ExpectedError, proc.waitall, [x, y, z])
-        self.assertRaises(proc.LinkedFailed, proc.waitall, [x])
-        self.assertEqual(proc.waitall([z]), [3])
-        self.assertRaises(ExpectedError, proc.waitall, [y])
-
-    def test_wait_all_exception_order(self):
-        # if there're several exceptions raised, the earliest one must be raised by wait
-        def first():
-            sleep(0.1)
-            raise ExpectedError('first')
-        a = proc.spawn(first)
-        b = proc.spawn(lambda: getcurrent().throw(ExpectedError('second')))
-        try:
-            proc.waitall([a, b])
-        except ExpectedError as ex:
-            assert 'second' in str(ex), repr(str(ex))
-        sleep(0.2)   # sleep to ensure that the other timer is raised
-
-    def test_multiple_listeners_error(self):
-        # if there was an error while calling a callback
-        # it should not prevent the other listeners from being called
-        # also, all of the errors should be logged, check the output
-        # manually that they are
-        p = proc.spawn(lambda: 5)
-        results = []
-
-        def listener1(*args):
-            results.append(10)
-            raise ExpectedError('listener1')
-
-        def listener2(*args):
-            results.append(20)
-            raise ExpectedError('listener2')
-
-        def listener3(*args):
-            raise ExpectedError('listener3')
-        p.link(listener1)
-        p.link(listener2)
-        p.link(listener3)
-        sleep(DELAY * 10)
-        assert results in [[10, 20], [20, 10]], results
-
-        p = proc.spawn(lambda: getcurrent().throw(ExpectedError('test_multiple_listeners_error')))
-        results = []
-        p.link(listener1)
-        p.link(listener2)
-        p.link(listener3)
-        sleep(DELAY * 10)
-        assert results in [[10, 20], [20, 10]], results
-
-    def _test_multiple_listeners_error_unlink(self, p):
-        # notification must not happen after unlink even
-        # though notification process has been already started
-        results = []
-
-        def listener1(*args):
-            p.unlink(listener2)
-            results.append(5)
-            raise ExpectedError('listener1')
-
-        def listener2(*args):
-            p.unlink(listener1)
-            results.append(5)
-            raise ExpectedError('listener2')
-
-        def listener3(*args):
-            raise ExpectedError('listener3')
-        p.link(listener1)
-        p.link(listener2)
-        p.link(listener3)
-        sleep(DELAY * 10)
-        assert results == [5], results
-
-    def test_multiple_listeners_error_unlink_Proc(self):
-        p = proc.spawn(lambda: 5)
-        self._test_multiple_listeners_error_unlink(p)
-
-    def test_multiple_listeners_error_unlink_Source(self):
-        p = proc.Source()
-        proc.spawn(p.send, 6)
-        self._test_multiple_listeners_error_unlink(p)
-
-    def test_killing_unlinked(self):
-        e = _event.Event()
-
-        def func():
-            try:
-                raise ExpectedError('test_killing_unlinked')
-            except:
-                e.send_exception(*sys.exc_info())
-        p = proc.spawn_link(func)
-        try:
-            try:
-                e.wait()
-            except ExpectedError:
-                pass
-        finally:
-            p.unlink()  # this disables LinkedCompleted that otherwise would be raised by the next line
-        sleep(DELAY)
-
-
-if __name__ == '__main__':
-    unittest.main()
index 5228ea7b572cc40484a8613fb1865a1873787288..7832de00ecccbc58c324c30a1966aa980a24f020 100644 (file)
@@ -1,6 +1,5 @@
 import unittest
 import socket as _original_sock
-from eventlet import api
 from eventlet.green import socket
 
 
diff --git a/eventlet/tests/test__twistedutil.py b/eventlet/tests/test__twistedutil.py
deleted file mode 100644 (file)
index 08e88be..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-from tests import requires_twisted
-import unittest
-try:
-    from twisted.internet import reactor
-    from twisted.internet.error import DNSLookupError
-    from twisted.internet import defer
-    from twisted.python.failure import Failure
-    from eventlet.twistedutil import block_on
-except ImportError:
-    pass
-
-
-class Test(unittest.TestCase):
-    @requires_twisted
-    def test_block_on_success(self):
-        from twisted.internet import reactor
-        d = reactor.resolver.getHostByName('www.google.com')
-        ip = block_on(d)
-        assert len(ip.split('.')) == 4, ip
-        ip2 = block_on(d)
-        assert ip == ip2, (ip, ip2)
-
-    @requires_twisted
-    def test_block_on_fail(self):
-        from twisted.internet import reactor
-        d = reactor.resolver.getHostByName('xxx')
-        self.assertRaises(DNSLookupError, block_on, d)
-
-    @requires_twisted
-    def test_block_on_already_succeed(self):
-        d = defer.succeed('hey corotwine')
-        res = block_on(d)
-        assert res == 'hey corotwine', repr(res)
-
-    @requires_twisted
-    def test_block_on_already_failed(self):
-        d = defer.fail(Failure(ZeroDivisionError()))
-        self.assertRaises(ZeroDivisionError, block_on, d)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/eventlet/tests/test__twistedutil_protocol.py b/eventlet/tests/test__twistedutil_protocol.py
deleted file mode 100644 (file)
index 29a26cf..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-from tests import requires_twisted
-
-import unittest
-try:
-    from twisted.internet import reactor
-    from twisted.internet.error import ConnectionDone
-    import eventlet.twistedutil.protocol as pr
-    from eventlet.twistedutil.protocols.basic import LineOnlyReceiverTransport
-except ImportError:
-    # stub out some of the twisted dependencies so it at least imports
-    class dummy(object):
-        pass
-    pr = dummy()
-    pr.UnbufferedTransport = None
-    pr.GreenTransport = None
-    pr.GreenClientCreator = lambda *a, **k: None
-
-    class reactor(object):
-        pass
-
-from eventlet import spawn, sleep, with_timeout, spawn_after
-from eventlet.coros import Event
-
-try:
-    from eventlet.green import socket
-except SyntaxError:
-    socket = None
-
-DELAY = 0.01
-
-if socket is not None:
-    def setup_server_socket(self, delay=DELAY, port=0):
-        s = socket.socket()
-        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-        s.bind(('127.0.0.1', port))
-        port = s.getsockname()[1]
-        s.listen(5)
-        s.settimeout(delay * 3)
-
-        def serve():
-            conn, addr = s.accept()
-            conn.settimeout(delay + 1)
-            try:
-                hello = conn.makefile().readline()[:-2]
-            except socket.timeout:
-                return
-            conn.sendall('you said %s. ' % hello)
-            sleep(delay)
-            conn.sendall('BYE')
-            sleep(delay)
-            # conn.close()
-        spawn(serve)
-        return port
-
-
-def setup_server_SpawnFactory(self, delay=DELAY, port=0):
-    def handle(conn):
-        port.stopListening()
-        try:
-            hello = conn.readline()
-        except ConnectionDone:
-            return
-        conn.write('you said %s. ' % hello)
-        sleep(delay)
-        conn.write('BYE')
-        sleep(delay)
-        conn.loseConnection()
-    port = reactor.listenTCP(0, pr.SpawnFactory(handle, LineOnlyReceiverTransport))
-    return port.getHost().port
-
-
-class TestCase(unittest.TestCase):
-    transportBufferSize = None
-
-    @property
-    def connector(self):
-        return pr.GreenClientCreator(reactor, self.gtransportClass, self.transportBufferSize)
-
-    @requires_twisted
-    def setUp(self):
-        port = self.setup_server()
-        self.conn = self.connector.connectTCP('127.0.0.1', port)
-        if self.transportBufferSize is not None:
-            self.assertEqual(self.transportBufferSize, self.conn.transport.bufferSize)
-
-
-class TestUnbufferedTransport(TestCase):
-    gtransportClass = pr.UnbufferedTransport
-    setup_server = setup_server_SpawnFactory
-
-    @requires_twisted
-    def test_full_read(self):
-        self.conn.write('hello\r\n')
-        self.assertEqual(self.conn.read(), 'you said hello. BYE')
-        self.assertEqual(self.conn.read(), '')
-        self.assertEqual(self.conn.read(), '')
-
-    @requires_twisted
-    def test_iterator(self):
-        self.conn.write('iterator\r\n')
-        self.assertEqual('you said iterator. BYE', ''.join(self.conn))
-
-
-class TestUnbufferedTransport_bufsize1(TestUnbufferedTransport):
-    transportBufferSize = 1
-    setup_server = setup_server_SpawnFactory
-
-
-class TestGreenTransport(TestUnbufferedTransport):
-    gtransportClass = pr.GreenTransport
-    setup_server = setup_server_SpawnFactory
-
-    @requires_twisted
-    def test_read(self):
-        self.conn.write('hello\r\n')
-        self.assertEqual(self.conn.read(9), 'you said ')
-        self.assertEqual(self.conn.read(999), 'hello. BYE')
-        self.assertEqual(self.conn.read(9), '')
-        self.assertEqual(self.conn.read(1), '')
-        self.assertEqual(self.conn.recv(9), '')
-        self.assertEqual(self.conn.recv(1), '')
-
-    @requires_twisted
-    def test_read2(self):
-        self.conn.write('world\r\n')
-        self.assertEqual(self.conn.read(), 'you said world. BYE')
-        self.assertEqual(self.conn.read(), '')
-        self.assertEqual(self.conn.recv(), '')
-
-    @requires_twisted
-    def test_iterator(self):
-        self.conn.write('iterator\r\n')
-        self.assertEqual('you said iterator. BYE', ''.join(self.conn))
-
-    _tests = [x for x in locals().keys() if x.startswith('test_')]
-
-    @requires_twisted
-    def test_resume_producing(self):
-        for test in self._tests:
-            self.setUp()
-            self.conn.resumeProducing()
-            getattr(self, test)()
-
-    @requires_twisted
-    def test_pause_producing(self):
-        self.conn.pauseProducing()
-        self.conn.write('hi\r\n')
-        result = with_timeout(DELAY * 10, self.conn.read, timeout_value='timed out')
-        self.assertEqual('timed out', result)
-
-    @requires_twisted
-    def test_pauseresume_producing(self):
-        self.conn.pauseProducing()
-        spawn_after(DELAY * 5, self.conn.resumeProducing)
-        self.conn.write('hi\r\n')
-        result = with_timeout(DELAY * 10, self.conn.read, timeout_value='timed out')
-        self.assertEqual('you said hi. BYE', result)
-
-
-class TestGreenTransport_bufsize1(TestGreenTransport):
-    transportBufferSize = 1
-
-# class TestGreenTransportError(TestCase):
-#     setup_server = setup_server_SpawnFactory
-#     gtransportClass = pr.GreenTransport
-#
-#     def test_read_error(self):
-#         self.conn.write('hello\r\n')
-# sleep(DELAY*1.5) # make sure the rest of data arrives
-#         try:
-#             1//0
-#         except:
-# self.conn.loseConnection(failure.Failure()) # does not work, why?
-#             spawn(self.conn._queue.send_exception, *sys.exc_info())
-#         self.assertEqual(self.conn.read(9), 'you said ')
-#         self.assertEqual(self.conn.read(7), 'hello. ')
-#         self.assertEqual(self.conn.read(9), 'BYE')
-#         self.assertRaises(ZeroDivisionError, self.conn.read, 9)
-#         self.assertEqual(self.conn.read(1), '')
-#         self.assertEqual(self.conn.read(1), '')
-#
-#     def test_recv_error(self):
-#         self.conn.write('hello')
-#         self.assertEqual('you said hello. ', self.conn.recv())
-# sleep(DELAY*1.5) # make sure the rest of data arrives
-#         try:
-#             1//0
-#         except:
-# self.conn.loseConnection(failure.Failure()) # does not work, why?
-#             spawn(self.conn._queue.send_exception, *sys.exc_info())
-#         self.assertEqual('BYE', self.conn.recv())
-#         self.assertRaises(ZeroDivisionError, self.conn.recv, 9)
-#         self.assertEqual('', self.conn.recv(1))
-#         self.assertEqual('', self.conn.recv())
-#
-
-if socket is not None:
-
-    class TestUnbufferedTransport_socketserver(TestUnbufferedTransport):
-        setup_server = setup_server_socket
-
-    class TestUnbufferedTransport_socketserver_bufsize1(TestUnbufferedTransport):
-        transportBufferSize = 1
-        setup_server = setup_server_socket
-
-    class TestGreenTransport_socketserver(TestGreenTransport):
-        setup_server = setup_server_socket
-
-    class TestGreenTransport_socketserver_bufsize1(TestGreenTransport):
-        transportBufferSize = 1
-        setup_server = setup_server_socket
-
-
-class TestTLSError(unittest.TestCase):
-    @requires_twisted
-    def test_server_connectionMade_never_called(self):
-        # trigger case when protocol instance is created,
-        # but it's connectionMade is never called
-        from gnutls.interfaces.twisted import X509Credentials
-        from gnutls.errors import GNUTLSError
-        cred = X509Credentials(None, None)
-        ev = Event()
-
-        def handle(conn):
-            ev.send("handle must not be called")
-        s = reactor.listenTLS(0, pr.SpawnFactory(handle, LineOnlyReceiverTransport), cred)
-        creator = pr.GreenClientCreator(reactor, LineOnlyReceiverTransport)
-        try:
-            conn = creator.connectTLS('127.0.0.1', s.getHost().port, cred)
-        except GNUTLSError:
-            pass
-        assert ev.poll() is None, repr(ev.poll())
-
-try:
-    import gnutls.interfaces.twisted
-except ImportError:
-    del TestTLSError
-
-
-@requires_twisted
-def main():
-    unittest.main()
-
-if __name__ == '__main__':
-    main()
index ec29fc6eb22c078d8905acabef713a2857b1f8ef..95d36b507b3fbf625502b8a57dd7c4a4f4d9f39b 100644 (file)
@@ -1,11 +1,15 @@
 import errno
 import struct
 
+from nose.tools import eq_
+
 import eventlet
 from eventlet import event
 from eventlet import websocket
 from eventlet.green import httplib
 from eventlet.green import socket
+from eventlet import websocket
+from eventlet.support import six
 
 from tests.wsgi_test import _TestBase
 
@@ -51,7 +55,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
         # Now, miss off key
         headers = dict(kv.split(': ') for kv in [
@@ -67,7 +71,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
         # No Upgrade now
         headers = dict(kv.split(': ') for kv in [
@@ -82,7 +86,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
     def test_correct_upgrade_request_13(self):
         for http_connection in ['Upgrade', 'UpGrAdE', 'keep-alive, Upgrade']:
@@ -97,16 +101,16 @@ class TestWebSocket(_TestBase):
             ]
             sock = eventlet.connect(('localhost', self.port))
 
-            sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+            sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
             result = sock.recv(1024)
             # The server responds the correct Websocket handshake
             print('Connection string: %r' % http_connection)
-            self.assertEqual(result, '\r\n'.join([
+            self.assertEqual(result, six.b('\r\n'.join([
                 'HTTP/1.1 101 Switching Protocols',
                 'Upgrade: websocket',
                 'Connection: Upgrade',
                 'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=\r\n\r\n',
-            ]))
+            ])))
 
     def test_send_recv_13(self):
         connect = [
@@ -121,15 +125,15 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)
         ws = websocket.RFC6455WebSocket(sock, {}, client=True)
-        ws.send('hello')
-        assert ws.wait() == 'hello'
-        ws.send('hello world!\x01')
+        ws.send(b'hello')
+        eq_(ws.wait(), b'hello')
+        ws.send(b'hello world!\x01')
         ws.send(u'hello world again!')
-        assert ws.wait() == 'hello world!\x01'
-        assert ws.wait() == u'hello world again!'
+        eq_(ws.wait(), b'hello world!\x01')
+        eq_(ws.wait(), u'hello world again!')
         ws.close()
         eventlet.sleep(0.01)
 
@@ -160,7 +164,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)  # get the headers
         sock.close()  # close while the app is running
         done_with_request.wait()
@@ -193,7 +197,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)  # get the headers
         closeframe = struct.pack('!BBIH', 1 << 7 | 8, 1 << 7 | 2, 0, 1000)
         sock.sendall(closeframe)  # "Close the connection" packet.
@@ -227,8 +231,8 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)  # get the headers
-        sock.sendall('\x07\xff')  # Weird packet.
+        sock.sendall(b'\x07\xff')  # Weird packet.
         done_with_request.wait()
         assert not error_detected[0]
index bf05555ae7f9280e3f4bc25b1c697d85c4ac2462..2858da00bd0f0a30e964c22e7f1cbf2b7c192a80 100644 (file)
@@ -4,12 +4,11 @@ import socket
 import eventlet
 from eventlet import event
 from eventlet import greenio
-from eventlet import wsgi
 from eventlet.green import httplib
-from eventlet.green import urllib2
+from eventlet.support import six
 from eventlet.websocket import WebSocket, WebSocketWSGI
 
-from tests import mock, LimitedTestCase, certificate_file, private_key_file
+from tests import certificate_file, LimitedTestCase, mock, private_key_file
 from tests import skip_if_no_ssl
 from tests.wsgi_test import _TestBase
 
@@ -42,13 +41,10 @@ class TestWebSocket(_TestBase):
         self.site = wsapp
 
     def test_incorrect_headers(self):
-        def raiser():
-            try:
-                urllib2.urlopen("http://localhost:%s/echo" % self.port)
-            except urllib2.HTTPError as e:
-                self.assertEqual(e.code, 400)
-                raise
-        self.assertRaises(urllib2.HTTPError, raiser)
+        http = httplib.HTTPConnection('localhost', self.port)
+        http.request("GET", "/echo")
+        response = http.getresponse()
+        assert response.status == 400
 
     def test_incomplete_headers_75(self):
         headers = dict(kv.split(': ') for kv in [
@@ -64,7 +60,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
     def test_incomplete_headers_76(self):
         # First test: Missing Connection:
@@ -81,7 +77,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
         # Now, miss off key2
         headers = dict(kv.split(': ') for kv in [
@@ -99,7 +95,7 @@ class TestWebSocket(_TestBase):
 
         self.assertEqual(resp.status, 400)
         self.assertEqual(resp.getheader('connection'), 'close')
-        self.assertEqual(resp.read(), '')
+        self.assertEqual(resp.read(), b'')
 
     def test_correct_upgrade_request_75(self):
         connect = [
@@ -113,16 +109,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         result = sock.recv(1024)
         # The server responds the correct Websocket handshake
-        self.assertEqual(result, '\r\n'.join([
+        self.assertEqual(result, six.b('\r\n'.join([
             'HTTP/1.1 101 Web Socket Protocol Handshake',
             'Upgrade: WebSocket',
             'Connection: Upgrade',
             'WebSocket-Origin: http://localhost:%s' % self.port,
             'WebSocket-Location: ws://localhost:%s/echo\r\n\r\n' % self.port,
-        ]))
+        ])))
 
     def test_correct_upgrade_request_76(self):
         connect = [
@@ -138,17 +134,17 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         result = sock.recv(1024)
         # The server responds the correct Websocket handshake
-        self.assertEqual(result, '\r\n'.join([
+        self.assertEqual(result, six.b('\r\n'.join([
             'HTTP/1.1 101 WebSocket Protocol Handshake',
             'Upgrade: WebSocket',
             'Connection: Upgrade',
             'Sec-WebSocket-Origin: http://localhost:%s' % self.port,
             'Sec-WebSocket-Protocol: ws',
             'Sec-WebSocket-Location: ws://localhost:%s/echo\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.port,
-        ]))
+        ])))
 
     def test_query_string(self):
         # verify that the query string comes out the other side unscathed
@@ -165,16 +161,17 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         result = sock.recv(1024)
-        self.assertEqual(result, '\r\n'.join([
+        self.assertEqual(result, six.b('\r\n'.join([
             'HTTP/1.1 101 WebSocket Protocol Handshake',
             'Upgrade: WebSocket',
             'Connection: Upgrade',
             'Sec-WebSocket-Origin: http://localhost:%s' % self.port,
             'Sec-WebSocket-Protocol: ws',
-            'Sec-WebSocket-Location: ws://localhost:%s/echo?query_string\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.port,
-        ]))
+            'Sec-WebSocket-Location: '
+            'ws://localhost:%s/echo?query_string\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.port,
+        ])))
 
     def test_empty_query_string(self):
         # verify that a single trailing ? doesn't get nuked
@@ -191,16 +188,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         result = sock.recv(1024)
-        self.assertEqual(result, '\r\n'.join([
+        self.assertEqual(result, six.b('\r\n'.join([
             'HTTP/1.1 101 WebSocket Protocol Handshake',
             'Upgrade: WebSocket',
             'Connection: Upgrade',
             'Sec-WebSocket-Origin: http://localhost:%s' % self.port,
             'Sec-WebSocket-Protocol: ws',
             'Sec-WebSocket-Location: ws://localhost:%s/echo?\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.port,
-        ]))
+        ])))
 
     def test_sending_messages_to_websocket_75(self):
         connect = [
@@ -214,16 +211,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)
-        sock.sendall('\x00hello\xFF')
+        sock.sendall(b'\x00hello\xFF')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00hello\xff')
-        sock.sendall('\x00start')
+        self.assertEqual(result, b'\x00hello\xff')
+        sock.sendall(b'\x00start')
         eventlet.sleep(0.001)
-        sock.sendall(' end\xff')
+        sock.sendall(b' end\xff')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00start end\xff')
+        self.assertEqual(result, b'\x00start end\xff')
         sock.shutdown(socket.SHUT_RDWR)
         sock.close()
         eventlet.sleep(0.01)
@@ -242,16 +239,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         sock.recv(1024)
-        sock.sendall('\x00hello\xFF')
+        sock.sendall(b'\x00hello\xFF')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00hello\xff')
-        sock.sendall('\x00start')
+        self.assertEqual(result, b'\x00hello\xff')
+        sock.sendall(b'\x00start')
         eventlet.sleep(0.001)
-        sock.sendall(' end\xff')
+        sock.sendall(b' end\xff')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00start end\xff')
+        self.assertEqual(result, b'\x00start end\xff')
         sock.shutdown(socket.SHUT_RDWR)
         sock.close()
         eventlet.sleep(0.01)
@@ -268,16 +265,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         resp = sock.recv(1024)
-        headers, result = resp.split('\r\n\r\n')
-        msgs = [result.strip('\x00\xff')]
+        headers, result = resp.split(b'\r\n\r\n')
+        msgs = [result.strip(b'\x00\xff')]
         cnt = 10
         while cnt:
-            msgs.append(sock.recv(20).strip('\x00\xff'))
+            msgs.append(sock.recv(20).strip(b'\x00\xff'))
             cnt -= 1
         # Last item in msgs is an empty string
-        self.assertEqual(msgs[:-1], ['msg %d' % i for i in range(10)])
+        self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)])
 
     def test_getting_messages_from_websocket_76(self):
         connect = [
@@ -293,16 +290,16 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         resp = sock.recv(1024)
-        headers, result = resp.split('\r\n\r\n')
-        msgs = [result[16:].strip('\x00\xff')]
+        headers, result = resp.split(b'\r\n\r\n')
+        msgs = [result[16:].strip(b'\x00\xff')]
         cnt = 10
         while cnt:
-            msgs.append(sock.recv(20).strip('\x00\xff'))
+            msgs.append(sock.recv(20).strip(b'\x00\xff'))
             cnt -= 1
         # Last item in msgs is an empty string
-        self.assertEqual(msgs[:-1], ['msg %d' % i for i in range(10)])
+        self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)])
 
     def test_breaking_the_connection_75(self):
         error_detected = [False]
@@ -330,7 +327,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)  # get the headers
         sock.close()  # close while the app is running
         done_with_request.wait()
@@ -364,7 +361,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         sock.recv(1024)  # get the headers
         sock.close()  # close while the app is running
         done_with_request.wait()
@@ -398,9 +395,9 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         sock.recv(1024)  # get the headers
-        sock.sendall('\xff\x00')  # "Close the connection" packet.
+        sock.sendall(b'\xff\x00')  # "Close the connection" packet.
         done_with_request.wait()
         assert not error_detected[0]
 
@@ -432,9 +429,9 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         sock.recv(1024)  # get the headers
-        sock.sendall('\xef\x00')  # Weird packet.
+        sock.sendall(b'\xef\x00')  # Weird packet.
         done_with_request.wait()
         assert error_detected[0]
 
@@ -452,11 +449,11 @@ class TestWebSocket(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         resp = sock.recv(1024)
-        headers, result = resp.split('\r\n\r\n')
+        headers, result = resp.split(b'\r\n\r\n')
         # The remote server should have immediately closed the connection.
-        self.assertEqual(result[16:], '\xff\x00')
+        self.assertEqual(result[16:], b'\xff\x00')
 
     def test_app_socket_errors_75(self):
         error_detected = [False]
@@ -484,7 +481,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n'))
         sock.recv(1024)
         done_with_request.wait()
         assert error_detected[0]
@@ -517,7 +514,7 @@ class TestWebSocket(_TestBase):
         ]
         sock = eventlet.connect(
             ('localhost', self.port))
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
         sock.recv(1024)
         done_with_request.wait()
         assert error_detected[0]
@@ -547,21 +544,25 @@ class TestWebSocketSSL(_TestBase):
         sock = eventlet.wrap_ssl(eventlet.connect(
             ('localhost', self.port)))
 
-        sock.sendall('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')
-        first_resp = sock.recv(1024)
+        sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U'))
+        first_resp = b''
+        while b'\r\n\r\n' not in first_resp:
+            first_resp += sock.recv()
+            print('resp now:')
+            print(first_resp)
         # make sure it sets the wss: protocol on the location header
-        loc_line = [x for x in first_resp.split("\r\n")
-                    if x.lower().startswith('sec-websocket-location')][0]
-        self.assert_("wss://localhost" in loc_line,
+        loc_line = [x for x in first_resp.split(b"\r\n")
+                    if x.lower().startswith(b'sec-websocket-location')][0]
+        self.assert_(b"wss://localhost" in loc_line,
                      "Expecting wss protocol in location: %s" % loc_line)
-        sock.sendall('\x00hello\xFF')
+        sock.sendall(b'\x00hello\xFF')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00hello\xff')
-        sock.sendall('\x00start')
+        self.assertEqual(result, b'\x00hello\xff')
+        sock.sendall(b'\x00start')
         eventlet.sleep(0.001)
-        sock.sendall(' end\xff')
+        sock.sendall(b' end\xff')
         result = sock.recv(1024)
-        self.assertEqual(result, '\x00start end\xff')
+        self.assertEqual(result, b'\x00start end\xff')
         greenio.shutdown_safe(sock)
         sock.close()
         eventlet.sleep(0.01)
@@ -579,13 +580,13 @@ class TestWebSocketObject(LimitedTestCase):
 
     def test_recieve(self):
         ws = self.test_ws
-        ws.socket.recv.return_value = '\x00hello\xFF'
+        ws.socket.recv.return_value = b'\x00hello\xFF'
         self.assertEqual(ws.wait(), 'hello')
-        self.assertEqual(ws._buf, '')
+        self.assertEqual(ws._buf, b'')
         self.assertEqual(len(ws._msgs), 0)
-        ws.socket.recv.return_value = ''
+        ws.socket.recv.return_value = b''
         self.assertEqual(ws.wait(), None)
-        self.assertEqual(ws._buf, '')
+        self.assertEqual(ws._buf, b'')
         self.assertEqual(len(ws._msgs), 0)
 
     def test_send_to_ws(self):
index 4ebf13602bb9f92af67ccd48aab51d15c7d80c23..179881df4f4c545fbf14aa3046abb2b705adf768 100644 (file)
@@ -17,7 +17,7 @@ from eventlet.green import subprocess
 from eventlet import greenio
 from eventlet import greenthread
 from eventlet import support
-from eventlet.support import six
+from eventlet.support import bytes_to_str, capture_stderr, six
 from eventlet import tpool
 from eventlet import wsgi
 
@@ -36,17 +36,17 @@ HttpReadResult = collections.namedtuple(
 def hello_world(env, start_response):
     if env['PATH_INFO'] == 'notexist':
         start_response('404 Not Found', [('Content-type', 'text/plain')])
-        return ["not found"]
+        return [b"not found"]
 
     start_response('200 OK', [('Content-type', 'text/plain')])
-    return ["hello world"]
+    return [b"hello world"]
 
 
 def chunked_app(env, start_response):
     start_response('200 OK', [('Content-type', 'text/plain')])
-    yield "this"
-    yield "is"
-    yield "chunked"
+    yield b"this"
+    yield b"is"
+    yield b"chunked"
 
 
 def chunked_fail_app(environ, start_response):
@@ -56,8 +56,8 @@ def chunked_fail_app(environ, start_response):
     start_response('200 OK', headers)
 
     # We start streaming data just fine.
-    yield "The dwarves of yore made mighty spells,"
-    yield "While hammers fell like ringing bells"
+    yield b"The dwarves of yore made mighty spells,"
+    yield b"While hammers fell like ringing bells"
 
     # Then the back-end fails!
     try:
@@ -67,13 +67,13 @@ def chunked_fail_app(environ, start_response):
         return
 
     # So rest of the response data is not available.
-    yield "In places deep, where dark things sleep,"
-    yield "In hollow halls beneath the fells."
+    yield b"In places deep, where dark things sleep,"
+    yield b"In hollow halls beneath the fells."
 
 
 def big_chunks(env, start_response):
     start_response('200 OK', [('Content-type', 'text/plain')])
-    line = 'a' * 8192
+    line = b'a' * 8192
     for x in range(10):
         yield line
 
@@ -94,9 +94,9 @@ def chunked_post(env, start_response):
     if env['PATH_INFO'] == '/a':
         return [env['wsgi.input'].read()]
     elif env['PATH_INFO'] == '/b':
-        return [x for x in iter(lambda: env['wsgi.input'].read(4096), '')]
+        return [x for x in iter(lambda: env['wsgi.input'].read(4096), b'')]
     elif env['PATH_INFO'] == '/c':
-        return [x for x in iter(lambda: env['wsgi.input'].read(1), '')]
+        return [x for x in iter(lambda: env['wsgi.input'].read(1), b'')]
 
 
 def already_handled(env, start_response):
@@ -145,16 +145,41 @@ hello world
 """
 
 
+def recvall(socket_):
+    result = b''
+    while True:
+        chunk = socket_.recv()
+        result += chunk
+        if chunk == b'':
+            break
+
+    return result
+
+
 class ConnectionClosed(Exception):
     pass
 
 
+def send_expect_close(sock, buf):
+    # Some tests will induce behavior that causes the remote end to
+    # close the connection before all of the data has been written.
+    # With small kernel buffer sizes, this can cause an EPIPE error.
+    # Since the test expects an early close, this can be ignored.
+    try:
+        sock.sendall(buf)
+    except socket.error as exc:
+        if support.get_errno(exc) != errno.EPIPE:
+            raise
+
+
 def read_http(sock):
-    fd = sock.makefile()
+    fd = sock.makefile('rb')
     try:
-        response_line = fd.readline().rstrip('\r\n')
+        response_line = bytes_to_str(fd.readline().rstrip(b'\r\n'))
     except socket.error as exc:
-        if support.get_errno(exc) == 10053:
+        # TODO find out whether 54 is ok here or not, I see it when running tests
+        # on Python 3
+        if support.get_errno(exc) in (10053, 54):
             raise ConnectionClosed
         raise
     if not response_line:
@@ -163,7 +188,7 @@ def read_http(sock):
     header_lines = []
     while True:
         line = fd.readline()
-        if line == '\r\n':
+        if line == b'\r\n':
             break
         else:
             header_lines.append(line)
@@ -174,7 +199,7 @@ def read_http(sock):
         x = x.strip()
         if not x:
             continue
-        key, value = x.split(':', 1)
+        key, value = bytes_to_str(x).split(':', 1)
         key = key.rstrip()
         value = value.lstrip()
         key_lower = key.lower()
@@ -256,20 +281,20 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result = fd.read()
         fd.close()
         # The server responds with the maximum version it supports
-        assert result.startswith('HTTP'), result
-        assert result.endswith('hello world'), result
+        assert result.startswith(b'HTTP'), result
+        assert result.endswith(b'hello world'), result
 
     def test_002_keepalive(self):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         read_http(sock)
@@ -284,7 +309,7 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         cancel = eventlet.Timeout(1, RuntimeError)
@@ -296,7 +321,7 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         read_http(sock)
@@ -324,9 +349,8 @@ class TestHttpd(_TestBase):
             path_parts.append('path')
         path = '/'.join(path_parts)
         request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path
-        fd = sock.makefile('rw')
-        fd.write(request)
-        fd.flush()
+        send_expect_close(sock, request.encode())
+        fd = sock.makefile('rb')
         result = fd.readline()
         if result:
             # windows closes the socket before the data is flushed,
@@ -338,7 +362,7 @@ class TestHttpd(_TestBase):
     def test_007_get_arg(self):
         # define a new handler that does a get_arg as well as a read_body
         def new_app(env, start_response):
-            body = env['wsgi.input'].read()
+            body = bytes_to_str(env['wsgi.input'].read())
             a = cgi.parse_qs(body).get('a', [1])[0]
             start_response('200 OK', [('Content-type', 'text/plain')])
             return [six.b('a is %s, body is %s' % (a, body))]
@@ -352,20 +376,20 @@ class TestHttpd(_TestBase):
             'Content-Length: 3',
             '',
             'a=a'))
-        fd = sock.makefile('w')
-        fd.write(request)
+        fd = sock.makefile('wb')
+        fd.write(request.encode())
         fd.flush()
 
         # send some junk after the actual request
         fd.write(b'01234567890123456789')
         result = read_http(sock)
-        self.assertEqual(result.body, 'a is a, body is a=a')
+        self.assertEqual(result.body, b'a is a, body is a=a')
         fd.close()
 
     def test_008_correctresponse(self):
         sock = eventlet.connect(('localhost', self.port))
 
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result_200 = read_http(sock)
@@ -384,37 +408,37 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
-        assert 'Transfer-Encoding: chunked' in fd.read()
+        assert b'Transfer-Encoding: chunked' in fd.read()
 
     def test_010_no_chunked_http_1_0(self):
         self.site.application = chunked_app
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
-        assert 'Transfer-Encoding: chunked' not in fd.read()
+        assert b'Transfer-Encoding: chunked' not in fd.read()
 
     def test_011_multiple_chunks(self):
         self.site.application = big_chunks
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
-        headers = ''
+        headers = b''
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 headers += line
-        assert 'Transfer-Encoding: chunked' in headers
+        assert b'Transfer-Encoding: chunked' in headers
         chunks = 0
         chunklen = int(fd.readline(), 16)
         while chunklen:
@@ -425,7 +449,7 @@ class TestHttpd(_TestBase):
         assert chunks > 1
         response = fd.read()
         # Require a CRLF to close the message body
-        self.assertEqual(response, '\r\n')
+        self.assertEqual(response, b'\r\n')
 
     @tests.skip_if_no_ssl
     def test_012_ssl_server(self):
@@ -447,14 +471,14 @@ class TestHttpd(_TestBase):
         sock.write(
             b'POST /foo HTTP/1.1\r\nHost: localhost\r\n'
             b'Connection: close\r\nContent-length:3\r\n\r\nabc')
-        result = sock.read(8192)
-        self.assertEqual(result[-3:], 'abc')
+        result = recvall(sock)
+        assert result.endswith(b'abc')
 
     @tests.skip_if_no_ssl
     def test_013_empty_return(self):
         def wsgi_app(environ, start_response):
             start_response("200 OK", [])
-            return [""]
+            return [b""]
 
         certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
         private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
@@ -467,58 +491,58 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(('localhost', server_sock.getsockname()[1]))
         sock = eventlet.wrap_ssl(sock)
         sock.write(b'GET /foo HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
-        result = sock.read(8192)
-        self.assertEqual(result[-4:], '\r\n\r\n')
+        result = recvall(sock)
+        assert result[-4:] == b'\r\n\r\n'
 
     def test_014_chunked_post(self):
         self.site.application = chunked_post
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
                  'Transfer-Encoding: chunked\r\n\r\n'
                  '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode())
         fd.flush()
         while True:
-            if fd.readline() == '\r\n':
+            if fd.readline() == b'\r\n':
                 break
         response = fd.read()
-        assert response == 'oh hai', 'invalid response %s' % response
+        assert response == b'oh hai', 'invalid response %s' % response
 
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write('PUT /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
                  'Transfer-Encoding: chunked\r\n\r\n'
                  '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode())
         fd.flush()
         while True:
-            if fd.readline() == '\r\n':
+            if fd.readline() == b'\r\n':
                 break
         response = fd.read()
-        assert response == 'oh hai', 'invalid response %s' % response
+        assert response == b'oh hai', 'invalid response %s' % response
 
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write('PUT /c HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
                  'Transfer-Encoding: chunked\r\n\r\n'
                  '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode())
         fd.flush()
         while True:
-            if fd.readline() == '\r\n':
+            if fd.readline() == b'\r\n':
                 break
         response = fd.read(8192)
-        assert response == 'oh hai', 'invalid response %s' % response
+        assert response == b'oh hai', 'invalid response %s' % response
 
     def test_015_write(self):
         self.site.application = use_write
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
         result1 = read_http(sock)
         assert 'content-length' in result1.headers_lower
 
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
         result2 = read_http(sock)
@@ -531,21 +555,21 @@ class TestHttpd(_TestBase):
         """
         def wsgi_app(environ, start_response):
             start_response('200 OK', [('Content-Length', '7')])
-            return ['testing']
+            return [b'testing']
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
         header_lines = []
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 header_lines.append(line)
         self.assertEqual(1, len(
-            [l for l in header_lines if l.lower().startswith('content-length')]))
+            [l for l in header_lines if l.lower().startswith(b'content-length')]))
 
     @tests.skip_if_no_ssl
     def test_017_ssl_zeroreturnerror(self):
@@ -588,7 +612,7 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
         fd.flush()
 
@@ -607,13 +631,13 @@ class TestHttpd(_TestBase):
         def use_fieldstorage(environ, start_response):
             cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
             start_response('200 OK', [('Content-type', 'text/plain')])
-            return ['hello!']
+            return [b'hello!']
 
         self.site.application = use_fieldstorage
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write('POST / HTTP/1.1\r\n'
                  'Host: localhost\r\n'
                  'Connection: close\r\n'
@@ -621,7 +645,7 @@ class TestHttpd(_TestBase):
                  '2\r\noh\r\n'
                  '4\r\n hai\r\n0\r\n\r\n'.encode())
         fd.flush()
-        assert 'hello!' in fd.read()
+        assert b'hello!' in fd.read()
 
     def test_020_x_forwarded_for(self):
         request_bytes = (
@@ -654,13 +678,13 @@ class TestHttpd(_TestBase):
         self.spawn_server(sock=server_sock_2)
         # do a single req/response to verify it's up
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result = fd.read(1024)
         fd.close()
-        assert result.startswith('HTTP'), result
-        assert result.endswith('hello world')
+        assert result.startswith(b'HTTP'), result
+        assert result.endswith(b'hello world'), result
 
         # shut down the server and verify the server_socket fd is still open,
         # but the actual socketobject passed in to wsgi.server is closed
@@ -673,13 +697,13 @@ class TestHttpd(_TestBase):
             self.assertEqual(support.get_errno(exc), errno.EBADF)
         self.spawn_server(sock=server_sock)
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result = fd.read(1024)
         fd.close()
-        assert result.startswith('HTTP'), result
-        assert result.endswith('hello world')
+        assert result.startswith(b'HTTP'), result
+        assert result.endswith(b'hello world'), result
 
     def test_021_environ_clobbering(self):
         def clobberin_time(environ, start_response):
@@ -695,13 +719,13 @@ class TestHttpd(_TestBase):
             return []
         self.site.application = clobberin_time
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write('GET / HTTP/1.1\r\n'
                  'Host: localhost\r\n'
                  'Connection: close\r\n'
                  '\r\n\r\n'.encode())
         fd.flush()
-        assert '200 OK' in fd.read()
+        assert b'200 OK' in fd.read()
 
     def test_022_custom_pool(self):
         # just test that it accepts the parameter for now
@@ -713,44 +737,44 @@ class TestHttpd(_TestBase):
         # this stuff is copied from test_001_server, could be better factored
         sock = eventlet.connect(
             ('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result = fd.read()
         fd.close()
-        assert result.startswith('HTTP'), result
-        assert result.endswith('hello world')
+        assert result.startswith(b'HTTP'), result
+        assert result.endswith(b'hello world'), result
 
     def test_023_bad_content_length(self):
         sock = eventlet.connect(
             ('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.0\r\nHost: localhost\r\nContent-length: argh\r\n\r\n')
         fd.flush()
         result = fd.read()
         fd.close()
-        assert result.startswith('HTTP'), result
-        assert '400 Bad Request' in result
-        assert '500' not in result
+        assert result.startswith(b'HTTP'), result
+        assert b'400 Bad Request' in result, result
+        assert b'500' not in result, result
 
     def test_024_expect_100_continue(self):
         def wsgi_app(environ, start_response):
             if int(environ['CONTENT_LENGTH']) > 1024:
                 start_response('417 Expectation Failed', [('Content-Length', '7')])
-                return ['failure']
+                return [b'failure']
             else:
                 text = environ['wsgi.input'].read()
                 start_response('200 OK', [('Content-Length', str(len(text)))])
                 return [text]
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\n'
                  b'Expect: 100-continue\r\n\r\n')
         fd.flush()
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed')
-        self.assertEqual(result.body, 'failure')
+        self.assertEqual(result.body, b'failure')
         fd.write(
             b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n'
             b'Expect: 100-continue\r\n\r\ntesting')
@@ -758,20 +782,20 @@ class TestHttpd(_TestBase):
         header_lines = []
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 header_lines.append(line)
-        assert header_lines[0].startswith('HTTP/1.1 100 Continue')
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
         header_lines = []
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 header_lines.append(line)
-        assert header_lines[0].startswith('HTTP/1.1 200 OK')
-        self.assertEqual(fd.read(7), 'testing')
+        assert header_lines[0].startswith(b'HTTP/1.1 200 OK')
+        assert fd.read(7) == b'testing'
         fd.close()
         sock.close()
 
@@ -779,7 +803,7 @@ class TestHttpd(_TestBase):
         def wsgi_app(environ, start_response):
             if int(environ['CONTENT_LENGTH']) > 1024:
                 start_response('417 Expectation Failed', [('Content-Length', '7')])
-                return ['failure']
+                return [b'failure']
             else:
                 environ['wsgi.input'].set_hundred_continue_response_headers(
                     [('Hundred-Continue-Header-1', 'H1'),
@@ -790,13 +814,13 @@ class TestHttpd(_TestBase):
                 return [text]
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\n'
                  b'Expect: 100-continue\r\n\r\n')
         fd.flush()
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed')
-        self.assertEqual(result.body, 'failure')
+        self.assertEqual(result.body, b'failure')
         fd.write(
             b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n'
             b'Expect: 100-continue\r\n\r\ntesting')
@@ -804,27 +828,181 @@ class TestHttpd(_TestBase):
         header_lines = []
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 header_lines.append(line.strip())
-        assert header_lines[0].startswith('HTTP/1.1 100 Continue')
-        headers = dict((k, v) for k, v in (h.split(': ', 1) for h in header_lines[1:]))
-        assert 'Hundred-Continue-Header-1' in headers
-        assert 'Hundred-Continue-Header-2' in headers
-        assert 'Hundred-Continue-Header-K' in headers
-        self.assertEqual('H1', headers['Hundred-Continue-Header-1'])
-        self.assertEqual('H2', headers['Hundred-Continue-Header-2'])
-        self.assertEqual('Hk', headers['Hundred-Continue-Header-K'])
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
+        headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:]))
+        assert b'Hundred-Continue-Header-1' in headers
+        assert b'Hundred-Continue-Header-2' in headers
+        assert b'Hundred-Continue-Header-K' in headers
+        self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1'])
+        self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2'])
+        self.assertEqual(b'Hk', headers[b'Hundred-Continue-Header-K'])
         header_lines = []
         while True:
             line = fd.readline()
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
             else:
                 header_lines.append(line)
-        assert header_lines[0].startswith('HTTP/1.1 200 OK')
-        self.assertEqual(fd.read(7), 'testing')
+        assert header_lines[0].startswith(b'HTTP/1.1 200 OK')
+        self.assertEqual(fd.read(7), b'testing')
+        fd.close()
+        sock.close()
+
+    def test_024b_expect_100_continue_with_headers_multiple_chunked(self):
+        def wsgi_app(environ, start_response):
+            environ['wsgi.input'].set_hundred_continue_response_headers(
+                [('Hundred-Continue-Header-1', 'H1'),
+                 ('Hundred-Continue-Header-2', 'H2')])
+            text = environ['wsgi.input'].read()
+
+            environ['wsgi.input'].set_hundred_continue_response_headers(
+                [('Hundred-Continue-Header-3', 'H3')])
+            environ['wsgi.input'].send_hundred_continue_response()
+
+            text += environ['wsgi.input'].read()
+
+            start_response('200 OK', [('Content-Length', str(len(text)))])
+            return [text]
+        self.site.application = wsgi_app
+        sock = eventlet.connect(('localhost', self.port))
+        fd = sock.makefile('rwb')
+        fd.write(b'PUT /a HTTP/1.1\r\n'
+                 b'Host: localhost\r\nConnection: close\r\n'
+                 b'Transfer-Encoding: chunked\r\n'
+                 b'Expect: 100-continue\r\n\r\n')
+        fd.flush()
+
+        # Expect 1st 100-continue response
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
+        headers = dict((k, v) for k, v in (h.split(b': ', 1)
+                                           for h in header_lines[1:]))
+        assert b'Hundred-Continue-Header-1' in headers
+        assert b'Hundred-Continue-Header-2' in headers
+        self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1'])
+        self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2'])
+
+        # Send message 1
+        fd.write(b'5\r\nfirst\r\n8\r\n message\r\n0\r\n\r\n')
+        fd.flush()
+
+        # Expect a 2nd 100-continue response
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
+        headers = dict((k, v) for k, v in (h.split(b': ', 1)
+                                           for h in header_lines[1:]))
+        assert b'Hundred-Continue-Header-3' in headers
+        self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3'])
+
+        # Send message 2
+        fd.write(b'8\r\n, second\r\n8\r\n message\r\n0\r\n\r\n')
+        fd.flush()
+
+        # Expect final 200-OK
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 200 OK')
+
+        self.assertEqual(fd.read(29), b'first message, second message')
+        fd.close()
+        sock.close()
+
+    def test_024c_expect_100_continue_with_headers_multiple_nonchunked(self):
+        def wsgi_app(environ, start_response):
+
+            environ['wsgi.input'].set_hundred_continue_response_headers(
+                [('Hundred-Continue-Header-1', 'H1'),
+                 ('Hundred-Continue-Header-2', 'H2')])
+            text = environ['wsgi.input'].read(13)
+
+            environ['wsgi.input'].set_hundred_continue_response_headers(
+                [('Hundred-Continue-Header-3', 'H3')])
+            environ['wsgi.input'].send_hundred_continue_response()
+
+            text += environ['wsgi.input'].read(16)
+
+            start_response('200 OK', [('Content-Length', str(len(text)))])
+            return [text]
+
+        self.site.application = wsgi_app
+        sock = eventlet.connect(('localhost', self.port))
+        fd = sock.makefile('rwb')
+        fd.write(b'PUT /a HTTP/1.1\r\n'
+                 b'Host: localhost\r\nConnection: close\r\n'
+                 b'Content-Length: 29\r\n'
+                 b'Expect: 100-continue\r\n\r\n')
+        fd.flush()
+
+        # Expect 1st 100-continue response
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
+        headers = dict((k, v) for k, v in (h.split(b': ', 1)
+                                           for h in header_lines[1:]))
+        assert b'Hundred-Continue-Header-1' in headers
+        assert b'Hundred-Continue-Header-2' in headers
+        self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1'])
+        self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2'])
+
+        # Send message 1
+        fd.write(b'first message')
+        fd.flush()
+
+        # Expect a 2nd 100-continue response
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 100 Continue')
+        headers = dict((k, v) for k, v in (h.split(b': ', 1)
+                                           for h in header_lines[1:]))
+        assert b'Hundred-Continue-Header-3' in headers
+        self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3'])
+
+        # Send message 2
+        fd.write(b', second message\r\n')
+        fd.flush()
+
+        # Expect final 200-OK
+        header_lines = []
+        while True:
+            line = fd.readline()
+            if line == b'\r\n':
+                break
+            else:
+                header_lines.append(line.strip())
+        assert header_lines[0].startswith(b'HTTP/1.1 200 OK')
+
+        self.assertEqual(fd.read(29), b'first message, second message')
         fd.close()
         sock.close()
 
@@ -833,11 +1011,8 @@ class TestHttpd(_TestBase):
         listener = greensocket.socket()
         listener.bind(('localhost', 0))
         # NOT calling listen, to trigger the error
-        self.logfile = six.StringIO()
-        self.spawn_server(sock=listener)
-        old_stderr = sys.stderr
-        try:
-            sys.stderr = self.logfile
+        with capture_stderr() as log:
+            self.spawn_server(sock=listener)
             eventlet.sleep(0)  # need to enter server loop
             try:
                 eventlet.connect(('localhost', self.port))
@@ -845,10 +1020,8 @@ class TestHttpd(_TestBase):
             except socket.error as exc:
                 self.assertEqual(support.get_errno(exc), errno.ECONNREFUSED)
 
-            log_content = self.logfile.getvalue()
-            assert 'Invalid argument' in log_content, log_content
-        finally:
-            sys.stderr = old_stderr
+        log_content = log.getvalue()
+        assert 'Invalid argument' in log_content, log_content
         debug.hub_exceptions(False)
 
     def test_026_log_format(self):
@@ -871,7 +1044,7 @@ class TestHttpd(_TestBase):
         result = read_http(sock)
         self.assertEqual(result.headers_lower['connection'], 'close')
         self.assertNotEqual(result.headers_lower.get('transfer-encoding'), 'chunked')
-        self.assertEqual(result.body, "thisischunked")
+        self.assertEqual(result.body, b"thisischunked")
 
     def test_minimum_chunk_size_parameter_leaves_httpprotocol_class_member_intact(self):
         start_size = wsgi.HttpProtocol.minimum_chunk_size
@@ -902,7 +1075,7 @@ class TestHttpd(_TestBase):
         self.assertEqual(result.body, expected_body)
 
         # verify that socket is closed by server
-        self.assertEqual(sock.recv(1), '')
+        self.assertEqual(sock.recv(1), b'')
 
     def test_026_http_10_nokeepalive(self):
         # verify that if an http/1.0 client sends connection: keep-alive
@@ -918,7 +1091,7 @@ class TestHttpd(_TestBase):
     def test_027_keepalive_chunked(self):
         self.site.application = chunked_post
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         common_suffix = (
             b'Host: localhost\r\nTransfer-Encoding: chunked\r\n\r\n' +
             b'10\r\n0123456789abcdef\r\n0\r\n\r\n')
@@ -967,9 +1140,9 @@ class TestHttpd(_TestBase):
             try:
                 client = ssl.wrap_socket(eventlet.connect(('localhost', port)))
                 client.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
-                result = client.read()
-                assert result.startswith('HTTP'), result
-                assert result.endswith('hello world')
+                result = recvall(client)
+                assert result.startswith(b'HTTP'), result
+                assert result.endswith(b'hello world')
             except ImportError:
                 pass  # TODO(openssl): should test with OpenSSL
             greenthread.kill(g)
@@ -995,13 +1168,13 @@ class TestHttpd(_TestBase):
                 env['eventlet.posthooks'].append(
                     (posthook1, (2,), {'multiplier': 3}))
                 start_response('200 OK', [('Content-Type', 'text/plain')])
-            yield ''
+            yield b''
         self.site.application = one_posthook_app
         sock = eventlet.connect(('localhost', self.port))
-        fp = sock.makefile('rw')
+        fp = sock.makefile('rwb')
         fp.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fp.flush()
-        self.assertEqual(fp.readline(), 'HTTP/1.1 200 OK\r\n')
+        self.assertEqual(fp.readline(), b'HTTP/1.1 200 OK\r\n')
         fp.close()
         sock.close()
         self.assertEqual(posthook1_count[0], 6)
@@ -1018,13 +1191,13 @@ class TestHttpd(_TestBase):
                 env['eventlet.posthooks'].append(
                     (posthook2, (100,), {'divisor': 4}))
                 start_response('200 OK', [('Content-Type', 'text/plain')])
-            yield ''
+            yield b''
         self.site.application = two_posthook_app
         sock = eventlet.connect(('localhost', self.port))
-        fp = sock.makefile('rw')
+        fp = sock.makefile('rwb')
         fp.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fp.flush()
-        self.assertEqual(fp.readline(), 'HTTP/1.1 200 OK\r\n')
+        self.assertEqual(fp.readline(), b'HTTP/1.1 200 OK\r\n')
         fp.close()
         sock.close()
         self.assertEqual(posthook1_count[0], 26)
@@ -1034,23 +1207,17 @@ class TestHttpd(_TestBase):
         sock = eventlet.connect(('localhost', self.port))
         request = 'GET / HTTP/1.0\r\nHost: localhost\r\nLong: %s\r\n\r\n' % \
             ('a' * 10000)
-        fd = sock.makefile('rw')
-        fd.write(request.encode())
-        fd.flush()
+        send_expect_close(sock, request.encode())
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.0 400 Header Line Too Long')
-        fd.close()
 
     def test_031_reject_large_headers(self):
         sock = eventlet.connect(('localhost', self.port))
-        headers = 'Name: Value\r\n' * 5050
+        headers = ('Name: %s\r\n' % ('a' * 7000,)) * 20
         request = 'GET / HTTP/1.0\r\nHost: localhost\r\n%s\r\n\r\n' % headers
-        fd = sock.makefile('rwb')
-        fd.write(request.encode())
-        fd.flush()
+        send_expect_close(sock, request.encode())
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.0 400 Headers Too Large')
-        fd.close()
 
     def test_032_wsgi_input_as_iterable(self):
         # https://bitbucket.org/eventlet/eventlet/issue/150
@@ -1065,12 +1232,12 @@ class TestHttpd(_TestBase):
                 yield chunk
 
         self.site.application = echo_by_iterating
-        upload_data = '123456789abcdef' * 100
+        upload_data = b'123456789abcdef' * 100
         request = (
             'POST / HTTP/1.0\r\n'
             'Host: localhost\r\n'
             'Content-Length: %i\r\n\r\n%s'
-        ) % (len(upload_data), upload_data)
+        ) % (len(upload_data), bytes_to_str(upload_data))
         sock = eventlet.connect(('localhost', self.port))
         fd = sock.makefile('rwb')
         fd.write(request.encode())
@@ -1083,26 +1250,26 @@ class TestHttpd(_TestBase):
     def test_zero_length_chunked_response(self):
         def zero_chunked_app(env, start_response):
             start_response('200 OK', [('Content-type', 'text/plain')])
-            yield ""
+            yield b""
 
         self.site.application = zero_chunked_app
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
-        response = fd.read().split('\r\n')
+        response = fd.read().split(b'\r\n')
         headers = []
         while True:
             h = response.pop(0)
             headers.append(h)
-            if h == '':
+            if h == b'':
                 break
-        assert 'Transfer-Encoding: chunked' in ''.join(headers)
+        assert b'Transfer-Encoding: chunked' in b''.join(headers), headers
         # should only be one chunk of zero size with two blank lines
         # (one terminates the chunk, one terminates the body)
-        self.assertEqual(response, ['0', '', ''])
+        self.assertEqual(response, [b'0', b'', b''])
 
     def test_configurable_url_length_limit(self):
         self.spawn_server(url_length_limit=20000)
@@ -1110,15 +1277,15 @@ class TestHttpd(_TestBase):
             ('localhost', self.port))
         path = 'x' * 15000
         request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path
-        fd = sock.makefile('rw')
-        fd.write(request)
+        fd = sock.makefile('rwb')
+        fd.write(request.encode())
         fd.flush()
         result = fd.readline()
         if result:
             # windows closes the socket before the data is flushed,
             # so we never get anything back
-            status = result.split(' ')[1]
-            self.assertEqual(status, '200')
+            status = result.split(b' ')[1]
+            self.assertEqual(status, b'200')
         fd.close()
 
     def test_aborted_chunked_post(self):
@@ -1130,7 +1297,7 @@ class TestHttpd(_TestBase):
                 content = env['wsgi.input'].read(1024)
             except IOError:
                 blew_up[0] = True
-                content = 'ok'
+                content = b'ok'
             read_content.send(content)
             start_response('200 OK', [('Content-Type', 'text/plain')])
             return [content]
@@ -1147,7 +1314,7 @@ class TestHttpd(_TestBase):
         sock.close()
         # the test passes if we successfully get here, and read all the data
         # in spite of the early close
-        self.assertEqual(read_content.wait(), 'ok')
+        self.assertEqual(read_content.wait(), b'ok')
         assert blew_up[0]
 
     def test_exceptions_close_connection(self):
@@ -1155,7 +1322,7 @@ class TestHttpd(_TestBase):
             raise RuntimeError("intentional error")
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result = read_http(sock)
@@ -1170,28 +1337,28 @@ class TestHttpd(_TestBase):
             yield u"non-encodable unicode: \u0230"
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 500 Internal Server Error')
         self.assertEqual(result.headers_lower['connection'], 'close')
-        assert 'unicode' in result.body
+        assert b'unicode' in result.body
 
     def test_path_info_decoding(self):
         def wsgi_app(environ, start_response):
             start_response("200 OK", [])
-            yield "decoded: %s" % environ['PATH_INFO']
-            yield "raw: %s" % environ['RAW_PATH_INFO']
+            yield six.b("decoded: %s" % environ['PATH_INFO'])
+            yield six.b("raw: %s" % environ['RAW_PATH_INFO'])
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET /a*b@%40%233 HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
         fd.flush()
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 200 OK')
-        assert 'decoded: /a*b@@#3' in result.body
-        assert 'raw: /a*b@%40%233' in result.body
+        assert b'decoded: /a*b@@#3' in result.body
+        assert b'raw: /a*b@%40%233' in result.body
 
     def test_ipv6(self):
         try:
@@ -1224,12 +1391,12 @@ class TestHttpd(_TestBase):
         self.site.application = crasher
 
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result1 = read_http(sock)
         self.assertEqual(result1.status, 'HTTP/1.1 500 Internal Server Error')
-        self.assertEqual(result1.body, '')
+        self.assertEqual(result1.body, b'')
         self.assertEqual(result1.headers_lower['connection'], 'close')
         assert 'transfer-encoding' not in result1.headers_lower
 
@@ -1237,14 +1404,14 @@ class TestHttpd(_TestBase):
         self.spawn_server(debug=True)
         self.site.application = crasher
         sock = eventlet.connect(('localhost', self.port))
-        fd = sock.makefile('w')
+        fd = sock.makefile('wb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
         fd.flush()
         result2 = read_http(sock)
         self.assertEqual(result2.status, 'HTTP/1.1 500 Internal Server Error')
-        assert 'intentional crash' in result2.body
-        assert 'RuntimeError' in result2.body
-        assert 'Traceback' in result2.body
+        assert b'intentional crash' in result2.body, result2.body
+        assert b'RuntimeError' in result2.body, result2.body
+        assert b'Traceback' in result2.body, result2.body
         self.assertEqual(result2.headers_lower['connection'], 'close')
         assert 'transfer-encoding' not in result2.headers_lower
 
@@ -1253,7 +1420,7 @@ class TestHttpd(_TestBase):
         """
         def long_response(environ, start_response):
             start_response('200 OK', [('Content-Length', '9876')])
-            yield 'a' * 9876
+            yield b'a' * 9876
 
         server_sock = eventlet.listen(('localhost', 0))
         self.port = server_sock.getsockname()[1]
@@ -1312,7 +1479,7 @@ class TestHttpd(_TestBase):
 
         def wsgi_app(environ, start_response):
             start_response('200 oK', [random_case_header])
-            return ['']
+            return [b'']
 
         self.spawn_server(site=wsgi_app, capitalize_response_headers=False)
 
@@ -1326,7 +1493,7 @@ class TestHttpd(_TestBase):
 
 
 def read_headers(sock):
-    fd = sock.makefile()
+    fd = sock.makefile('rb')
     try:
         response_line = fd.readline()
     except socket.error as exc:
@@ -1339,7 +1506,7 @@ def read_headers(sock):
     header_lines = []
     while True:
         line = fd.readline()
-        if line == '\r\n':
+        if line == b'\r\n':
             break
         else:
             header_lines.append(line)
@@ -1348,10 +1515,10 @@ def read_headers(sock):
         x = x.strip()
         if not x:
             continue
-        key, value = x.split(': ', 1)
+        key, value = x.split(b': ', 1)
         assert key.lower() not in headers, "%s header duplicated" % key
-        headers[key.lower()] = value
-    return response_line, headers
+        headers[bytes_to_str(key.lower())] = bytes_to_str(value)
+    return bytes_to_str(response_line), headers
 
 
 class IterableAlreadyHandledTest(_TestBase):
@@ -1366,7 +1533,7 @@ class IterableAlreadyHandledTest(_TestBase):
         sock = eventlet.connect(
             ('localhost', self.port))
 
-        fd = sock.makefile('rw')
+        fd = sock.makefile('rwb')
         fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
 
         fd.flush()
@@ -1378,7 +1545,7 @@ class IterableAlreadyHandledTest(_TestBase):
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 200 OK')
         self.assertEqual(result.headers_lower.get('transfer-encoding'), 'chunked')
-        self.assertEqual(result.body, '0\r\n\r\n')  # Still coming back chunked
+        self.assertEqual(result.body, b'0\r\n\r\n')  # Still coming back chunked
 
 
 class ProxiedIterableAlreadyHandledTest(IterableAlreadyHandledTest):
@@ -1418,13 +1585,13 @@ class TestChunkedInput(_TestBase):
             self.yield_next_space = False
 
             def response_iter():
-                yield ' '
+                yield b' '
                 num_sleeps = 0
                 while not self.yield_next_space and num_sleeps < 200:
                     eventlet.sleep(.01)
                     num_sleeps += 1
 
-                yield ' '
+                yield b' '
 
             start_response('200 OK',
                            [('Content-Type', 'text/plain'),
@@ -1458,7 +1625,7 @@ class TestChunkedInput(_TestBase):
 
     def ping(self, fd):
         fd.sendall(b"GET /ping HTTP/1.1\r\n\r\n")
-        self.assertEqual(read_http(fd).body, "pong")
+        self.assertEqual(read_http(fd).body, b"pong")
 
     def test_short_read_with_content_length(self):
         body = self.body()
@@ -1467,7 +1634,7 @@ class TestChunkedInput(_TestBase):
 
         fd = self.connect()
         fd.sendall(req.encode())
-        self.assertEqual(read_http(fd).body, "this is ch")
+        self.assertEqual(read_http(fd).body, b"this is ch")
 
         self.ping(fd)
         fd.close()
@@ -1478,7 +1645,7 @@ class TestChunkedInput(_TestBase):
               "Content-Length:0\r\n\r\n" + body
         fd = self.connect()
         fd.sendall(req.encode())
-        self.assertEqual(read_http(fd).body, "this is ch")
+        self.assertEqual(read_http(fd).body, b"this is ch")
 
         self.ping(fd)
         fd.close()
@@ -1489,7 +1656,7 @@ class TestChunkedInput(_TestBase):
 
         fd = self.connect()
         fd.sendall(req.encode())
-        self.assertEqual(read_http(fd).body, "this is ch")
+        self.assertEqual(read_http(fd).body, b"this is ch")
 
         self.ping(fd)
         fd.close()
@@ -1500,7 +1667,7 @@ class TestChunkedInput(_TestBase):
 
         fd = self.connect()
         fd.sendall(req.encode())
-        self.assertEqual(read_http(fd).body, "pong")
+        self.assertEqual(read_http(fd).body, b"pong")
 
         self.ping(fd)
         fd.close()
@@ -1512,7 +1679,7 @@ class TestChunkedInput(_TestBase):
 
         fd = self.connect()
         fd.sendall(req.encode())
-        self.assertEqual(read_http(fd).body, 'this is chunked\nline 2\nline3')
+        self.assertEqual(read_http(fd).body, b'this is chunked\nline 2\nline3')
         fd.close()
 
     def test_chunked_readline_wsgi_override_minimum_chunk_size(self):
@@ -1520,14 +1687,14 @@ class TestChunkedInput(_TestBase):
         fd = self.connect()
         fd.sendall(b"POST /yield_spaces/override_min HTTP/1.1\r\nContent-Length: 0\r\n\r\n")
 
-        resp_so_far = ''
+        resp_so_far = b''
         with eventlet.Timeout(.1):
             while True:
                 one_byte = fd.recv(1)
                 resp_so_far += one_byte
-                if resp_so_far.endswith('\r\n\r\n'):
+                if resp_so_far.endswith(b'\r\n\r\n'):
                     break
-            self.assertEqual(fd.recv(1), ' ')
+            self.assertEqual(fd.recv(1), b' ')
         try:
             with eventlet.Timeout(.1):
                 fd.recv(1)
@@ -1538,22 +1705,22 @@ class TestChunkedInput(_TestBase):
         self.yield_next_space = True
 
         with eventlet.Timeout(.1):
-            self.assertEqual(fd.recv(1), ' ')
+            self.assertEqual(fd.recv(1), b' ')
 
     def test_chunked_readline_wsgi_not_override_minimum_chunk_size(self):
 
         fd = self.connect()
         fd.sendall(b"POST /yield_spaces HTTP/1.1\r\nContent-Length: 0\r\n\r\n")
 
-        resp_so_far = ''
+        resp_so_far = b''
         try:
             with eventlet.Timeout(.1):
                 while True:
                     one_byte = fd.recv(1)
                     resp_so_far += one_byte
-                    if resp_so_far.endswith('\r\n\r\n'):
+                    if resp_so_far.endswith(b'\r\n\r\n'):
                         break
-                self.assertEqual(fd.recv(1), ' ')
+                self.assertEqual(fd.recv(1), b' ')
         except eventlet.Timeout:
             pass
         else:
index 91a690f62eb8fa75989af5019e2ac3421c45c5f1..d925a042b82544ba1624e72f1c97860bd71ce15c 100644 (file)
@@ -136,7 +136,7 @@ if __name__ == '__main__':
             # req #1 - normal
             sock1 = eventlet.connect(server_addr)
             sock1.settimeout(0.1)
-            fd1 = sock1.makefile('rw')
+            fd1 = sock1.makefile('rwb')
             fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
             fd1.flush()
             tests.wsgi_test.read_http(sock1)
diff --git a/tests/runtests.sh b/tests/runtests.sh
new file mode 100644 (file)
index 0000000..b49cd3a
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash -x
+RES=0
+case $1 in      
+  python-eventlet)
+    echo "Testing $1"
+    python -c "import eventlet"
+    RES=$?
+  ;;
+  *)
+    echo "test not defined, skipping..."
+  ;;
+esac
+exit $RES